def __init__(self, array_id, rest_client): self.common = CommonFunctions(rest_client) self.get_resource = self.common.get_resource self.create_resource = self.common.create_resource self.modify_resource = self.common.modify_resource self.delete_resource = self.common.delete_resource self.array_id = array_id
def __init__(self, array_id, rest_client): """__init__.""" self.provisioning = ProvisioningFunctions(array_id, rest_client) self.common = CommonFunctions(rest_client) self.get_resource = self.common.get_resource self.create_resource = self.common.create_resource self.modify_resource = self.common.modify_resource self.delete_resource = self.common.delete_resource self.array_id = array_id
def __init__(self, username=None, password=None, server_ip=None, port=None, verify=None, u4v_version=constants.UNIVMAX_VERSION, interval=5, retries=200, array_id=None): """__init__.""" self.end_date = int(round(time.time() * 1000)) self.start_date = (self.end_date - 3600000) self.array_id = array_id if not self.array_id: try: self.array_id = CFG.get('setup', 'array') except Exception: LOG.warning("No array id specified. Please set " "array ID using the 'set_array_id(array_id)' " "function.") if CFG is not None: if not username: username = CFG.get('setup', 'username') if not password: password = CFG.get('setup', 'password') if not server_ip: server_ip = CFG.get('setup', 'server_ip') if not port: port = CFG.get('setup', 'port') if verify is None: try: verify = CFG.get('setup', 'verify') if verify.lower() == 'false': verify = False elif verify.lower() == 'true': verify = True except Exception: verify = True base_url = "https://%s:%s/univmax/restapi" % (server_ip, port) self.rest_client = RestRequests(username, password, verify, base_url) self.request = self.rest_client.rest_request self.U4V_VERSION = u4v_version self.common = CommonFunctions(self.request, interval, retries, u4v_version) self.provisioning = ProvisioningFunctions(self.array_id, self.request, self.common, self.U4V_VERSION) self.performance = PerformanceFunctions(self.array_id, self.request, self.common, self.provisioning, self.U4V_VERSION) self.replication = ReplicationFunctions(self.array_id, self.request, self.common, self.provisioning, self.U4V_VERSION) self.migration = MigrationFunctions(self.array_id, self.request, self.common, self.U4V_VERSION)
class WLPFunctions(object): def __init__(self, array_id, rest_client): """__init__.""" self.common = CommonFunctions(rest_client) self.array_id = array_id def get_wlp_information(self, array_id): """Get the latest timestamp from WLP for processing New Workloads. :param array_id: array id -- str :returns: wlp details -- dict """ response = self.common.get_resource( category=WLP, resource_level=SYMMETRIX, resource_level_id=array_id) return response if response else dict() def get_headroom(self, array_id, workload=None, srp=None, slo=None): """Get the Remaining Headroom Capacity. Get the headroom capacity for a given srp/ slo/ workload combination. :param array_id: array id -- str :param workload: the workload type -- str :param srp: storage resource pool id -- str :param slo: service level id -- str :returns: headroom details -- dict """ params = dict() if srp: params[SRP] = srp if slo: params[SLO] = slo if workload: params[WORKLOADTYPE] = workload response = self.common.get_resource( category=WLP, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=HEADROOM, params=params) return response.get(GB_HEADROOM, list()) if response else list()
def __init__(self, username=None, password=None, server_ip=None, port=None, verify=None, u4v_version=constants.UNISPHERE_VERSION, interval=5, retries=200, array_id=None, application_type=None, remote_array=None, remote_array_2=None): """__init__.""" config = config_handler.set_logger_and_config(file_path) self.end_date = int(round(time.time() * 1000)) self.start_date = (self.end_date - 3600000) self.array_id = array_id # Set array ID if not self.array_id: try: self.array_id = config.get(SETUP, ARRAY) except Exception: LOG.warning( 'No array id specified. Please set array ID using ' 'U4VConn.set_array_id(array_id).') # Set environment config if config is not None: if not username: username = config.get(SETUP, USERNAME) if not password: password = config.get(SETUP, PASSWORD) if not server_ip: server_ip = config.get(SETUP, SERVER_IP) if not port: port = config.get(SETUP, PORT) # Optional Parameters for SRDF Remote array configurations if config.has_option(SETUP, R_ARRAY): if not remote_array: self.remote_array = config.get(SETUP, R_ARRAY) else: self.remote_array = None if config.has_option(SETUP, R_ARRAY_2): if not remote_array_2: self.remote_array_2 = config.get(SETUP, R_ARRAY_2) else: self.remote_array_2 = None # Set verification if verify is None: try: verify = config.get(SETUP, VERIFY) if verify.lower() == 'false': verify = False elif verify.lower() == 'true': verify = True except Exception: verify = True if None in [username, password, server_ip, port]: raise exception.MissingConfigurationException # Initialise REST session base_url = 'https://{server_ip}:{port}/univmax/restapi'.format( server_ip=server_ip, port=port) self.rest_client = RestRequests( username, password, verify, base_url, interval, retries, application_type) self.request = self.rest_client.rest_request self.common = CommonFunctions(self.rest_client) self.provisioning = ProvisioningFunctions(self.array_id, self.rest_client) self.performance = PerformanceFunctions(self.array_id, self.rest_client) self.replication = ReplicationFunctions(self.array_id, self.rest_client) self.metro_dr = MetroDRFunctions(self.array_id, self.rest_client) self.migration = MigrationFunctions(self.array_id, self.rest_client) self.wlp = WLPFunctions(self.array_id, self.rest_client) self.snapshot_policy = SnapshotPolicyFunctions(self.array_id, self.rest_client) self.system = SystemFunctions(self.array_id, self.rest_client) self.validate_unisphere()
class U4VConn(object): """U4VConn.""" def __init__(self, username=None, password=None, server_ip=None, port=None, verify=None, u4v_version=constants.UNISPHERE_VERSION, interval=5, retries=200, array_id=None, application_type=None, remote_array=None, remote_array_2=None): """__init__.""" config = config_handler.set_logger_and_config(file_path) self.end_date = int(round(time.time() * 1000)) self.start_date = (self.end_date - 3600000) self.array_id = array_id # Set array ID if not self.array_id: try: self.array_id = config.get(SETUP, ARRAY) except Exception: LOG.warning( 'No array id specified. Please set array ID using ' 'U4VConn.set_array_id(array_id).') # Set environment config if config is not None: if not username: username = config.get(SETUP, USERNAME) if not password: password = config.get(SETUP, PASSWORD) if not server_ip: server_ip = config.get(SETUP, SERVER_IP) if not port: port = config.get(SETUP, PORT) # Optional Parameters for SRDF Remote array configurations if config.has_option(SETUP, R_ARRAY): if not remote_array: self.remote_array = config.get(SETUP, R_ARRAY) else: self.remote_array = None if config.has_option(SETUP, R_ARRAY_2): if not remote_array_2: self.remote_array_2 = config.get(SETUP, R_ARRAY_2) else: self.remote_array_2 = None # Set verification if verify is None: try: verify = config.get(SETUP, VERIFY) if verify.lower() == 'false': verify = False elif verify.lower() == 'true': verify = True except Exception: verify = True if None in [username, password, server_ip, port]: raise exception.MissingConfigurationException # Initialise REST session base_url = 'https://{server_ip}:{port}/univmax/restapi'.format( server_ip=server_ip, port=port) self.rest_client = RestRequests( username, password, verify, base_url, interval, retries, application_type) self.request = self.rest_client.rest_request self.common = CommonFunctions(self.rest_client) self.provisioning = ProvisioningFunctions(self.array_id, self.rest_client) self.performance = PerformanceFunctions(self.array_id, self.rest_client) self.replication = ReplicationFunctions(self.array_id, self.rest_client) self.metro_dr = MetroDRFunctions(self.array_id, self.rest_client) self.migration = MigrationFunctions(self.array_id, self.rest_client) self.wlp = WLPFunctions(self.array_id, self.rest_client) self.snapshot_policy = SnapshotPolicyFunctions(self.array_id, self.rest_client) self.system = SystemFunctions(self.array_id, self.rest_client) self.validate_unisphere() def close_session(self): """Close the current rest session.""" self.rest_client.close_session() def set_requests_timeout(self, timeout_value): """Set the requests timeout. :param timeout_value: the new timeout value -- int """ self.rest_client.timeout = timeout_value def set_array_id(self, array_id): """Set the array serial number. :param array_id: the array serial number -- str """ self.array_id = array_id self.performance.array_id = array_id self.provisioning.array_id = array_id self.replication.array_id = array_id self.migration.array_id = array_id self.wlp.array_id = array_id self.system.array_id = array_id def validate_unisphere(self): """Check that the minimum version of Unisphere is in-use. If the version of Unisphere used does not meet minimum requirements the application will exit gracefully. :raises: SystemExit """ uni_ver, major_ver = self.common.get_uni_version() if int(major_ver) < int(constants.UNISPHERE_VERSION): msg = ('Unisphere version {uv} does not meet the minimum ' 'requirement of v9.2.0.x Please upgrade your version of ' 'Unisphere to use this SDK. Exiting...'.format(uv=uni_ver)) sys.exit(msg) else: LOG.debug('Unisphere version {uv} passes minimum requirement ' 'check.'.format(uv=uni_ver))
def __init__(self, array_id, rest_client): """__init__.""" self.common = CommonFunctions(rest_client) self.array_id = array_id
class SnapshotPolicyFunctions(object): def __init__(self, array_id, rest_client): self.common = CommonFunctions(rest_client) self.get_resource = self.common.get_resource self.create_resource = self.common.create_resource self.modify_resource = self.common.modify_resource self.delete_resource = self.common.delete_resource self.array_id = array_id def get_snapshot_policy_list(self): """Given a snapshot policy name, return snapshot policy details. :returns: snapshot policy names -- list """ response = self.get_resource(category=REPLICATION, resource_level=SYMMETRIX, resource_level_id=self.array_id, resource_type=SNAPSHOT_POLICY) return response.get('name', list()) if response else list() def get_snapshot_policy(self, snapshot_policy_name): """Given a snapshot policy name, return snapshot policy details. :param snapshot_policy_name: name of the snapshot policy -- str :returns: snapshot policy details -- dict """ return self.get_resource(category=REPLICATION, resource_level=SYMMETRIX, resource_level_id=self.array_id, resource_type=SNAPSHOT_POLICY, resource_type_id=snapshot_policy_name) def get_snapshot_policy_storage_group_list(self, snapshot_policy_name): """Get list of storage groups associated to specified snapshot policy. :param snapshot_policy_name: name of the snapshot policy -- str :returns: snapshot policy details -- list """ response = self.get_resource(category=REPLICATION, resource_level=SYMMETRIX, resource_level_id=self.array_id, resource_type=SNAPSHOT_POLICY, resource_type_id=snapshot_policy_name, object_type=STORAGEGROUP) return response.get('name', list()) if response else list() def create_snapshot_policy(self, snapshot_policy_name, interval, cloud_retention_days=None, cloud_provider_name=None, local_snapshot_policy_secure=False, local_snapshot_policy_snapshot_count=None, offset_mins=None, compliance_count_warning=None, compliance_count_critical=None, _async=False): """Create a new snapshot policy. :param snapshot_policy_name: the snapshot policy name -- str :param interval: The value of the interval counter for snapshot policy execution. Must be one of '10 Minutes', '12 Minutes', '15 Minutes', '20 Minutes', '30 Minutes', '1 Hour', '2 Hours', '3 Hours', '4 Hours', '6 Hours', '8 Hours', '12 Hours', '1 Day', '7 Days' -- str :param cloud_retention_days: part of cloud_snapshot_policy_details number of days to retain the policy -- int :param cloud_provider_name: part of cloud_snapshot_policy_details the cloud provider name -- str :param local_snapshot_policy_secure: secure snapshots may only be terminated after they expire or by Dell EMC support -- bool :param local_snapshot_policy_snapshot_count: the max snapshot count of the policy -- int :param offset_mins: Defines when, within the interval the snapshots will be taken for a specified Snapshot Policy. The offset must be less than the interval of the Snapshot Policy. For daily snapshots the offset is the number of minutes after midnight UTC, for weekly the offset is from midnight UTC on the Sunday. The format must be in minutes -- int :param compliance_count_warning: The Number of snapshots which are not failed or bad when compliance changes to warning. -- int :param compliance_count_critical: The Number of snapshots which are not failed or bad when compliance changes to critical. -- int :param _async: is the operation asynchronous -- bool :returns: resource object -- dict """ payload = dict() if snapshot_policy_name: payload.update({'snapshot_policy_name': snapshot_policy_name}) else: msg = 'Snapshot policy name cannot be None.' LOG.exception(msg) raise exception.InvalidInputException(data=msg) if cloud_provider_name: if not cloud_retention_days: msg = ('If cloud_provider_name is set, cloud_retention_days ' 'cannot be None.') LOG.exception(msg) raise exception.InvalidInputException(data=msg) cloud_snapshot_policy_details = { 'cloud_retention_days': cloud_retention_days, 'cloud_provider_name': cloud_provider_name } payload.update({ 'cloud_snapshot_policy_details': cloud_snapshot_policy_details }) elif local_snapshot_policy_snapshot_count: local_snapshot_policy_details = { 'snapshot_count': local_snapshot_policy_snapshot_count } if local_snapshot_policy_secure: local_snapshot_policy_details.update( {'secure': local_snapshot_policy_secure}) LOG.warning('The secure snap option cannot be enabled or ' 'disabled on an existing policy. Secure ' 'snapshots may only be terminated after ' 'they expire or by customer-authorized ' 'Dell EMC support.') payload.update({ 'local_snapshot_policy_details': local_snapshot_policy_details }) else: msg = ('One of cloud snapshot policy or local snapshot policy ' 'must be chosen. Check that you have the minimum ' 'parameters set.') LOG.exception(msg) raise exception.InvalidInputException(data=msg) msg = ('The interval supplied must be one of \'10 Minutes\', ' '\'12 Minutes\', \'15 Minutes\' etc.') if interval: try: index = [ x.lower() for x in (constants.SNAPSHOT_POLICY_INTERVALS) ].index(interval.lower()) except ValueError as error: LOG.exception(msg) raise exception.InvalidInputException(data=msg) from error payload.update( {'interval': constants.SNAPSHOT_POLICY_INTERVALS[index]}) else: message = 'interval cannot be None. {}'.format(msg) LOG.exception(message) raise exception.InvalidInputException(data=message) if offset_mins: payload.update({'offset_mins': offset_mins}) if compliance_count_warning: payload.update( {'compliance_count_warning': compliance_count_warning}) if compliance_count_critical: payload.update( {'compliance_count_critical': compliance_count_critical}) if _async: payload.update(constants.ASYNC_UPDATE) return self.create_resource(category=REPLICATION, resource_level=SYMMETRIX, resource_level_id=self.array_id, resource_type=SNAPSHOT_POLICY, payload=payload) def associate_to_storage_groups(self, snapshot_policy_name, storage_group_names, _async=False): """Associate a snapshot policy to storage group(s). :param snapshot_policy_name: the snapshot policy name -- str :param storage_group_names: List of storage group names -- list :param _async: is the operation asynchronous -- bool :returns: resource object -- dict """ return self.modify_snapshot_policy( snapshot_policy_name, constants.ASSOCIATE_TO_STORAGE_GROUPS, storage_group_names=storage_group_names, _async=_async) def disassociate_from_storage_groups(self, snapshot_policy_name, storage_group_names, _async=False): """Disassociate a snapshot policy from storage group(s). :param snapshot_policy_name: the snapshot policy name -- str :param storage_group_names: List of storage group names -- list :param _async: is the operation asynchronous -- bool :returns: resource object -- dict """ return self.modify_snapshot_policy( snapshot_policy_name, constants.DISASSOCIATE_FROM_STORAGE_GROUPS, storage_group_names=storage_group_names, _async=_async) def suspend_snapshot_policy(self, snapshot_policy_name, _async=False): """Suspend a snapshot policy. :param snapshot_policy_name: the snapshot policy name -- str :param _async: is the operation asynchronous -- bool :returns: resource object -- dict """ return self.modify_snapshot_policy(snapshot_policy_name, constants.SUSPEND_POLICY, _async=_async) def resume_snapshot_policy(self, snapshot_policy_name, _async=False): """Suspend a snapshot policy :param snapshot_policy_name: the snapshot policy name -- str :param _async: is the operation asynchronous -- bool :returns: resource object -- dict """ return self.modify_snapshot_policy(snapshot_policy_name, constants.RESUME_POLICY, _async=_async) def modify_snapshot_policy_properties(self, snapshot_policy_name, interval=None, offset_mins=None, snapshot_count=None, compliance_count_warning=None, compliance_count_critical=None, new_snapshot_policy_name=None, _async=False): """Suspend a snapshot policy :param snapshot_policy_name: the snapshot policy name -- str :param interval: The value of the interval counter for snapshot policy execution. Must be one of '10 Minutes', '12 Minutes', '15 Minutes', '20 Minutes', '30 Minutes', '1 Hour', '2 Hours', '3 Hours', '4 Hours', '6 Hours', '8 Hours', '12 Hours', '1 Day', '7 Days' -- str :param offset_mins: The number of minutes after 00:00 on Monday to first run the service policy. The offset must be less than the interval of the Snapshot Policy. The format must be in minutes -- int :param snapshot_count: The maximum number of snapshots that should be maintained for a specified Snapshot Policy. The maximum count must be between 1 to 1024. -- int :param compliance_count_warning: The Number of snapshots which are not failed or bad when compliance changes to warning. The warning compliance count cannot be set to 0 and must be less than or equal to the maximum count of the Snapshot Policy. -- int :param compliance_count_critical: The Number of snapshots which are not failed or bad when compliance changes to critical. If the warning compliance count is also set, the critical compliance count must be less than or equal to that. -- int :param new_snapshot_policy_name: change the name if set -- str :param _async: is the operation asynchronous -- bool :returns: resource object -- dict """ return self.modify_snapshot_policy( snapshot_policy_name, constants.MODIFY_POLICY, interval=interval, offset_mins=offset_mins, snapshot_count=snapshot_count, compliance_count_warning=compliance_count_warning, compliance_count_critical=compliance_count_critical, new_snapshot_policy_name=new_snapshot_policy_name, _async=_async) def modify_snapshot_policy(self, snapshot_policy_name, action, interval=None, offset_mins=None, snapshot_count=None, compliance_count_warning=None, compliance_count_critical=None, storage_group_names=None, new_snapshot_policy_name=None, _async=False): """Modify a snapshot policy This can be action: [Modify, Suspend, Resume, AssociateToStorageGroups, DisassociateFromStorageGroups]. A modify of the snapshot policy or adding or removing storage groups associated with the policy. :param snapshot_policy_name: the snapshot policy name -- str :param action: the modification action, must be one of 'AssociateToStorageGroups', 'DisassociateFromStorageGroups' 'Modify', 'Suspend', 'Resume' -- str :param interval: The value of the interval counter for snapshot policy execution. Must be one of '10 Minutes', '12 Minutes', '15 Minutes', '20 Minutes', '30 Minutes', '1 Hour', '2 Hours', '3 Hours', '4 Hours', '6 Hours', '8 Hours', '12 Hours', '1 Day', '7 Days' -- str :param offset_mins: The number of minutes after 00:00 on Monday to first run the service policy. The offset must be less than the interval of the Snapshot Policy. The format must be in minutes -- int :param snapshot_count: The maximum number of snapshots that should be maintained for a specified Snapshot Policy. The maximum count must be between 1 to 1024. -- int :param compliance_count_warning: The Number of snapshots which are not failed or bad when compliance changes to warning. The warning compliance count cannot be set to 0 and must be less than or equal to the maximum count of the Snapshot Policy. -- int :param compliance_count_critical: The Number of snapshots which are not failed or bad when compliance changes to critical. If the warning compliance count is also set, the critical compliance count must be less than or equal to that. -- int :param storage_group_names: List of storage group names -- list :param new_snapshot_policy_name: change the name if set -- str :param _async: is the operation asynchronous -- bool :returns: resource object -- dict """ payload = dict() if not snapshot_policy_name: msg = 'Snapshot policy name cannot be None.' LOG.exception(msg) raise exception.InvalidInputException(data=msg) msg = ('The action supplied must be one of \'Modify\', ' '\'Suspend\', \'Resume\', \'AssociateToStorageGroups\', ' '\'DisassociateFromStorageGroups\'.') if action: try: index = [ x.lower() for x in (constants.SNAPSHOT_POLICY_ACTIONS) ].index(action.lower()) except ValueError as error: LOG.exception(msg) raise exception.InvalidInputException(data=msg) from error else: message = 'The action cannot be None. {}'.format(msg) LOG.exception(message) raise exception.InvalidInputException(data=message) payload.update({'action': constants.SNAPSHOT_POLICY_ACTIONS[index]}) if action.lower() == constants.ASSOCIATE_TO_STORAGE_GROUPS.lower(): LOG.info('Associating storage groups to {spn}.'.format( spn=snapshot_policy_name)) if not storage_group_names: msg = 'storage_group_names cannot be None.' LOG.exception(msg) raise exception.InvalidInputException(data=msg) associate_to_storage_group_param = dict() storage_group_name_param = { 'storage_group_name': storage_group_names } associate_to_storage_group_param.update( {'associate_to_storage_group': storage_group_name_param}) payload.update(associate_to_storage_group_param) elif action.lower() == ( constants.DISASSOCIATE_FROM_STORAGE_GROUPS.lower()): LOG.info('Disassociating storage groups from {spn}.'.format( spn=snapshot_policy_name)) if not storage_group_names: msg = 'storage_group_names cannot be None.' LOG.exception(msg) raise exception.InvalidInputException(data=msg) disassociate_from_storage_group_param = dict() storage_group_name_param = { 'storage_group_name': storage_group_names } disassociate_from_storage_group_param.update( {'disassociate_from_storage_group': storage_group_name_param}) payload.update(disassociate_from_storage_group_param) elif action.lower() == constants.MODIFY_POLICY.lower(): LOG.info('Modifying {spn}.'.format(spn=snapshot_policy_name)) modify_param = dict() if new_snapshot_policy_name: modify_param.update( {'snapshot_policy_name': new_snapshot_policy_name}) if interval: modify_param.update( {'interval_mins': policy_interval_enum.get(interval)}) if offset_mins: modify_param.update({'offset_mins': offset_mins}) if snapshot_count: modify_param.update({'snapshot_count': snapshot_count}) if compliance_count_warning: modify_param.update( {'compliance_count_warning': compliance_count_warning}) if compliance_count_critical: modify_param.update( {'compliance_count_critical': compliance_count_critical}) if modify_param: payload.update({'modify': modify_param}) else: msg = 'No modify payload received.' LOG.exception(msg) raise exception.InvalidInputException(data=msg) elif action.lower() == constants.SUSPEND_POLICY.lower(): LOG.info('Suspending {spn}.'.format(spn=snapshot_policy_name)) elif action.lower() == constants.RESUME_POLICY.lower(): LOG.info('Resuming {spn}.'.format(spn=snapshot_policy_name)) if _async: payload.update(constants.ASYNC_UPDATE) return self.modify_resource(category=REPLICATION, resource_level=SYMMETRIX, resource_level_id=self.array_id, resource_type=SNAPSHOT_POLICY, resource_type_id=snapshot_policy_name, payload=payload) def delete_snapshot_policy(self, snapshot_policy_name): """Delete a snapshot policy :param snapshot_policy_name: the snapshot policy name -- str """ if not snapshot_policy_name: msg = 'Snapshot policy name cannot be None.' LOG.exception(msg) raise exception.InvalidInputException(data=msg) self.delete_resource(category=REPLICATION, resource_level=SYMMETRIX, resource_level_id=self.array_id, resource_type=SNAPSHOT_POLICY, resource_type_id=snapshot_policy_name) def get_snapshot_policy_compliance(self, storage_group_name, last_week=False, last_four_weeks=False, from_epoch=None, to_epoch=None, from_time_string=None, to_time_string=None): """Get compliance attributes on a storage group. :param storage_group_name: storage group name :param last_week: compliance in last week -- bool :param last_four_weeks: compliance in last four weeks -- bool :param from_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param to_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param from_time_string: human readable date -- str e.g 2020-12-01 15:00 :param to_time_string: human readable date -- str e.g 2020-12-01 15:00 :returns: resource -- dict """ if not storage_group_name: msg = 'Storage group name cannot be None.' LOG.exception(msg) raise exception.InvalidInputException(data=msg) msg, query_params = self.verify_input_params(last_week, last_four_weeks, from_epoch, to_epoch, from_time_string, to_time_string) if msg: LOG.exception(msg) raise exception.InvalidInputException(data=msg) return self.get_resource(category=REPLICATION, resource_level=SYMMETRIX, resource_level_id=self.array_id, resource_type=STORAGEGROUP, resource_type_id=storage_group_name, resource=COMPLIANCE, object_type=SNAPSHOT, params=query_params) def get_snapshot_policy_compliance_last_week(self, storage_group_name): """Get compliance attributes on a storage group for the last week. :param storage_group_name: storage group name :returns: resource -- dict """ return self.get_snapshot_policy_compliance(storage_group_name, last_week=True) def get_snapshot_policy_compliance_last_four_weeks(self, storage_group_name): """Get compliance attributes for the last four weeks. Get compliance attributes on a storage group for the last four weeks :param storage_group_name: storage group name :returns: resource -- dict """ return self.get_snapshot_policy_compliance(storage_group_name, last_four_weeks=True) def get_snapshot_policy_compliance_epoch(self, storage_group_name, from_epoch=None, to_epoch=None): """Get compliance attributes for the last four weeks. Get compliance attributes on a storage group for the last four weeks :param storage_group_name: storage group name :param from_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param to_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :returns: resource -- dict """ return self.get_snapshot_policy_compliance(storage_group_name, from_epoch=from_epoch, to_epoch=to_epoch) def get_snapshot_policy_compliance_human_readable_time( self, storage_group_name, from_time_string=None, to_time_string=None): """Get compliance attributes for the last four weeks. Get compliance attributes on a storage group for the last four weeks :param storage_group_name: storage group name :param from_time_string: human readable date -- str e.g 2020-12-01 15:00 :param to_time_string: human readable date -- str e.g 2020-12-01 15:00 :returns: resource -- dict """ return self.get_snapshot_policy_compliance( storage_group_name, from_time_string=from_time_string, to_time_string=to_time_string) def verify_input_params(self, last_week, last_four_weeks, from_epoch, to_epoch, from_time_string, to_time_string): """Verify the input parameters for compliance. :param last_week: compliance in last week -- bool :param last_four_weeks: compliance in last four weeks -- bool :param from_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param to_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param from_time_string: human readable date -- str e.g 2020-12-01 15:00 :param to_time_string: human readable date -- str e.g 2020-12-01 15:00 :returns: msg or None -- str query_params -- dict """ msg = self.verify_combination(last_week, last_four_weeks, from_epoch, from_time_string) if msg: return msg, None msg, query_params = self.verify_from_epoch(from_epoch, to_epoch, to_time_string) if msg: return msg, None if to_epoch: if not from_epoch and not from_time_string: return ('to_epoch must be accompanied with one of from_epoch ' 'or from_time_string.', None) if to_time_string: return ('to_epoch and to_time_string should not both ' 'be supplied as they are different formats of the ' 'same thing.', None) msg, query_params = self.verify_from_time_string( to_epoch, to_time_string, from_time_string) if msg: return msg, None if to_time_string: if not from_time_string and not from_epoch: return ('to_time_string must be accompanied with one of ' 'from_time_string or to_epoch.', None) return None, query_params @staticmethod def verify_combination(last_week, last_four_weeks, from_epoch, from_time_string): """Verify the valid combinations for compliance. :param last_week: compliance in last week -- bool :param last_four_weeks: compliance in last four weeks -- bool :param from_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param from_time_string: human readable date -- str e.g 2020-12-01 15:00 :returns: msg or None -- str """ input_params_list = ([ last_week, last_four_weeks, from_epoch, from_time_string ]) if len([i for i in input_params_list if i]) > 1: return ('Only one of last_week, last_four_weeks, from_epoch, ' 'from_time_string can be true or not None.') return None def verify_from_epoch(self, from_epoch, to_epoch, to_time_string): """Verify the the from_epoch param for compliance. :param from_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param to_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param to_time_string: human readable date -- str e.g 2020-12-01 15:00 :returns: msg or None -- str query_params -- dict """ query_params = dict() if from_epoch: if self.common.check_epoch_timestamp(from_epoch): if not to_epoch and not to_time_string: return ('from_epoch must be accompanied with one of ' 'to_epoch or to_time_string.', None) if to_epoch: if self.common.check_epoch_timestamp(to_epoch): query_params['from_epoch'] = from_epoch query_params['to_epoch'] = to_epoch else: return ('to_epoch {} is in the wrong format.'.format( to_epoch), None) elif to_time_string: if self.common.check_timestamp(to_time_string): query_params['from_epoch'] = from_epoch query_params['toTimeString'] = to_time_string else: return ('to_time_string {} is in the wrong format.'. format(to_time_string), None) else: return ( 'from_epoch {} is in the wrong format.'.format(from_epoch), None) return None, query_params def verify_from_time_string(self, to_epoch, to_time_string, from_time_string): """Verify the the from_time_string param for compliance. :param to_epoch: timestamp since epoch -- str e.g 1606820929 (seconds) :param from_time_string: human readable date -- str e.g 2020-12-01 15:00 :param to_time_string: human readable date -- str e.g 2020-12-01 15:00 :returns: msg or None -- str query_params -- dict """ query_params = dict() if from_time_string: if self.common.check_timestamp(from_time_string): if not to_time_string and not to_epoch: return ('from_time_string must be accompanied with one of ' 'to_time_string or to_epoch.', None) if to_time_string: if self.common.check_timestamp(to_time_string): query_params['fromTimeString'] = from_time_string query_params['toTimeString'] = to_time_string else: return ('to_time_string {} is in the wrong format.'. format(to_time_string), None) elif to_epoch: query_params['fromTimeString'] = from_time_string query_params['to_epoch'] = to_epoch else: return ('from_time_string {} is in the wrong format.'.format( from_time_string), None) return None, query_params
class SystemFunctions(object): """SystemFunctions.""" def __init__(self, array_id, rest_client): """__init__.""" self.common = CommonFunctions(rest_client) self.array_id = array_id def get_system_health(self, array_id=None): """Query for system health information. :param array_id: array id -- str :returns: system health -- dict """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, object_type=HEALTH) def list_system_health_check(self, array_id=None): """List previously run system health checks. :param array_id: array id -- str :returns: system health checks -- list """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, object_type=HEALTH, object_type_id=HEALTH_CHECK) def get_health_check_details(self, health_check_id, array_id=None): """Gets details of individual health check. :param health_check_id: health check id -- str :param array_id: array id -- str :returns: health check details -- dict """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=HEALTH, resource_type_id=HEALTH_CHECK, object_type=health_check_id) def perform_health_check(self, array_id=None, description=None): """Initiate a environmental health check. :param array_id: array id -- str :param description: description for health check, if not set this will default to 'PyU4V-array_id-date-time' :returns: health check property details -- dict """ array_id = self.array_id if not array_id else array_id now = datetime.now() date_now, time_now = now.strftime('%d%m%Y'), now.strftime('%H%M%S') if not description: description = 'PyU4V-{arr}-{date}-{time}'.format(arr=array_id, date=date_now, time=time_now) return self.common.create_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, object_type=HEALTH, object_type_id=HEALTH_CHECK, payload={DESCRIPTION: description}) def delete_health_check(self, health_check_id, array_id=None): """Delete a health check record. :param health_check_id: health check id -- str :param array_id: array id -- str """ array_id = self.array_id if not array_id else array_id self.common.delete_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=HEALTH, resource_type_id=HEALTH_CHECK, object_type=health_check_id) def get_disk_id_list(self, array_id=None, failed=False): """Get a list of disks ids installed. :param array_id: array id -- str :param failed: if only failed disks should be returned -- bool :returns: disk ids -- list """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=DISK, params={FAILED: failed}) def get_disk_details(self, disk_id, array_id=None): """Get details for specified disk id. :param disk_id: disk id -- str :param array_id: array id -- str :returns: disk details -- dict """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=DISK, resource_type_id=disk_id) def get_tags(self, array_id=None, tag_name=None, storage_group_id=None, num_of_storage_groups=None, num_of_arrays=None): """Query for a list of tag names. The input parameters represent optional filters for the tag query, including any filters will apply that filter to the list of returned tags. :param array_id: filter by array id -- str :param tag_name: filter by tag name -- str :param storage_group_id: filter by storage group id -- str :param num_of_storage_groups: filter by tags that are in x or greater amount of storage groups -- int :param num_of_arrays: filter by tags that in y or greater amount of arrays -- int :returns: tags -- list """ filters = dict() if array_id: filters[ARRAY_ID] = array_id if tag_name: filters[TAG_NAME] = tag_name if storage_group_id: filters[SG_ID] = storage_group_id if num_of_storage_groups: filters[SG_NUM] = str(num_of_storage_groups) if num_of_arrays: filters[ARRAY_NUM] = str(num_of_arrays) return self.common.get_resource(category=SYSTEM, resource_level=TAG, params=filters) def get_tagged_objects(self, tag_name): """Get a list of objects with specified tag. :param tag_name: tag name -- str :returns: tags -- list """ return self.common.get_resource(category=SYSTEM, resource_level=TAG, resource_level_id=tag_name)
class SystemFunctions(object): """SystemFunctions.""" def __init__(self, array_id, rest_client): """__init__.""" self.common = CommonFunctions(rest_client) self.array_id = array_id def get_system_health(self, array_id=None): """Query for system health information. :param array_id: array id -- str :returns: system health -- dict """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, object_type=HEALTH) def list_system_health_check(self, array_id=None): """List previously run system health checks. :param array_id: array id -- str :returns: system health checks -- list """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, object_type=HEALTH, object_type_id=HEALTH_CHECK) def get_health_check_details(self, health_check_id, array_id=None): """Gets details of individual health check. :param health_check_id: health check id -- str :param array_id: array id -- str :returns: health check details -- dict """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=HEALTH, resource_type_id=HEALTH_CHECK, object_type=health_check_id) def perform_health_check(self, array_id=None, description=None): """Initiate a environmental health check. :param array_id: array id -- str :param description: description for health check, if not set this will default to 'PyU4V-array_id-date-time' -- str :returns: health check property details -- dict """ array_id = self.array_id if not array_id else array_id now = datetime.now() date_now, time_now = now.strftime('%d%m%Y'), now.strftime('%H%M%S') if not description: description = 'PyU4V-{arr}-{date}-{time}'.format(arr=array_id, date=date_now, time=time_now) return self.common.create_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, object_type=HEALTH, object_type_id=HEALTH_CHECK, payload={DESCRIPTION: description}) def delete_health_check(self, health_check_id, array_id=None): """Delete a health check record. :param health_check_id: health check id -- str :param array_id: array id -- str """ array_id = self.array_id if not array_id else array_id self.common.delete_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=HEALTH, resource_type_id=HEALTH_CHECK, object_type=health_check_id) def get_disk_id_list(self, array_id=None, failed=False): """Get a list of disks ids installed. :param array_id: array id -- str :param failed: if only failed disks should be returned -- bool :returns: disk ids -- list """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=DISK, params={FAILED: failed}) def get_disk_details(self, disk_id, array_id=None): """Get details for specified disk id. :param disk_id: disk id -- str :param array_id: array id -- str :returns: disk details -- dict """ array_id = self.array_id if not array_id else array_id return self.common.get_resource(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=DISK, resource_type_id=disk_id) def get_tags(self, array_id=None, tag_name=None, storage_group_id=None, num_of_storage_groups=None, num_of_arrays=None): """Query for a list of tag names. The input parameters represent optional filters for the tag query, including any filters will apply that filter to the list of returned tags. :param array_id: filter by array id -- str :param tag_name: filter by tag name -- str :param storage_group_id: filter by storage group id -- str :param num_of_storage_groups: filter by tags that are in x or greater amount of storage groups -- int :param num_of_arrays: filter by tags that in y or greater amount of arrays -- int :returns: tags -- list """ filters = dict() if array_id: filters[ARRAY_ID] = array_id if tag_name: filters[TAG_NAME] = tag_name if storage_group_id: filters[SG_ID] = storage_group_id if num_of_storage_groups: filters[SG_NUM] = str(num_of_storage_groups) if num_of_arrays: filters[ARRAY_NUM] = str(num_of_arrays) return self.common.get_resource(category=SYSTEM, resource_level=TAG, params=filters) def get_tagged_objects(self, tag_name): """Get a list of objects with specified tag. :param tag_name: tag name -- str :returns: tags -- list """ return self.common.get_resource(category=SYSTEM, resource_level=TAG, resource_level_id=tag_name) def get_alert_summary(self): """Gets Alert Summary information. :returns: summary of alerts on system - dict """ return self.common.get_resource(category=SYSTEM, resource_level=ALERT_SUMMARY) def get_alert_ids(self, array=None, _type=None, severity=None, state=None, created_date=None, _object=None, object_type=None, acknowledged=False, description=None): """Get a list of Alert Ids. Parameters for this function can be combined to create a search pattern based on multiple filters to target results. :param array: filters returned list to display Alert Ids that are associated with the specified array e.g. "000213234443" or "<like>443" -- str :param _type: filters returned list to display Alert Ids that has the following type: ARRAY, PERFORMANCE, SERVER :param severity: filters returned list to display only Alert Ids with specified severity: NORMAL, INFORMATION, MINOR, WARNING, CRITICAL, FATAL -- str :param state: filters returned list to display Alert Ids that has the following state: NEW, ACKNOWLEDGED, CLEARED -- str :param created_date: filters returned list to display Alert Ids that are greater than(">1"), Less than("<1") or equal to the specified created_date "MMM-dd-yyyy HH:mm:ss.SSS" -- str :param _object: filters returned list to display Alert Ids that are associated with the specified array object e.g. equal to "object=SRP_3" -- str :param object_type: filters returned list to display Alert Ids that are associated with the specified array object type e.g. equal to "object_type=Director" -- str :param acknowledged: filters returned list to display Alert Ids that are acknowledged or not -- bool :param description: filters returned list to contain text matching description in body -- str :returns: list of alert ids -- list """ filters = dict() filters['acknowledged'] = acknowledged if array: filters['array'] = array if _type: filters['type'] = _type if severity: filters['severity'] = severity if state: filters['state'] = state if created_date: filters['created_date'] = created_date if _object: filters['_object'] = _object if object_type: filters['object_type'] = object_type if description: filters['description'] = description response = self.common.get_resource(category=SYSTEM, resource_level=ALERT, params=filters) return response.get('alertId') if response else list() def get_alert_details(self, alert_id): """Gets the details of an alert. :param alert_id: alert_id uniquely identifying alert on Unisphere system -- str :returns: alert details -- dict """ return self.common.get_resource(category=SYSTEM, resource_level=ALERT, resource_level_id=alert_id) def acknowledge_alert(self, alert_id): """Acknowledges an alert. :param alert_id: alert_id uniquely identifying alert on Unisphere system --str :returns: alert details -- dict """ payload = {'editAlertActionParam': 'ACKNOWLEDGE'} return self.common.modify_resource(category=SYSTEM, resource_level=ALERT, resource_level_id=alert_id, payload=payload) def delete_alert(self, alert_id): """Deletes Specified Alert. :param alert_id: alert_id uniquely identifying alert on Unisphere system --str """ return self.common.delete_resource(category=SYSTEM, resource_level=ALERT, resource_level_id=alert_id) def _download_settings(self, request_body, file_name=None, dir_path=None, return_binary=False): """Download settings helper method. :param request_body: payload request body -- dict :param file_name: zip file name -- str :param dir_path: file save location -- str :param return_binary: return settings binary data -- bool :returns: export details -- dict """ date_time = datetime.fromtimestamp(time.time()) if not file_name: file_name = '{fn}-{d}.{e}'.format( fn=SETTINGS_FILENAME_TEMPLATE, d=date_time.strftime(STR_TIME_FORMAT), e=ZIP_SUFFIX) response = self.common.download_file(category=SYSTEM, resource_level=SETTINGS, resource_type=EXPORT_FILE, payload=request_body) return_dict = {'success': False} if response: # Return binary data, do not write to file if return_binary: return_dict['binary_data'] = response.content # Write to file else: file_path = file_handler.write_binary_data_to_file( data=response, file_extension=ZIP_SUFFIX, file_name=file_name, dir_path=dir_path) return_dict['settings_path'] = file_path return_dict['success'] = True return_dict['settings_time'] = date_time LOG.info('The settings download request was successful.') return return_dict def download_all_settings(self, file_password, dir_path=None, file_name=None, array_id=None, return_binary=False): """Download all settings. Export settings feature allows the saving of a subset of system settings to a file. The exported settings have a generic format and do not contain any specific information regarding particular storage array or Unisphere instance, thus making it applicable in any environment. The intention is to help users to port the system wide settings to another instance of Unisphere, and also to capture single array settings so that they can be applied to another storage array within single instance or another instance of Unisphere at any point of time. - All settings: - Unisphere settings: - Alert notifications - Performance metrics - Performance preferences - Performance user templates - System settings: - Alert policies - Alert level notifications - Performance thresholds - System thresholds By default settings will be written to a zip file in the current working directory unless a supplied directory path and/or file name are provided. If any extension is provided in the file name it will be replaced with .zip before the data is written to file. If instead the file writing process should be handled outside of PyU4V or uploaded directly to Unisphere without any file handling set return_binary to True. The response dict will have the settings binary data included in key 'binary_data'. :param file_password: password to sign file (required) -- str :param dir_path: file save location -- str :param file_name: zip file name -- str :param array_id: array id -- str :param return_binary: return settings binary data -- bool :returns: download details -- dict """ array_id = self.array_id if not array_id else array_id request_body = {FILE_PASSWORD: file_password, SRC_ARRAY: array_id} return self._download_settings(request_body=request_body, dir_path=dir_path, file_name=file_name, return_binary=return_binary) def download_unisphere_settings( self, file_password, dir_path=None, file_name=None, return_binary=False, exclude_alert_notification_settings=False, exclude_performance_preference_settings=False, exclude_performance_user_templates=False, exclude_performance_metric_settings=False): """Download Unisphere settings. - Unisphere settings: - Alert notifications - Performance metrics - Performance preferences - Performance user templates By default settings will be written to a zip file in the current working directory unless a supplied directory path and/or file name are provided. If any extension is provided in the file name it will be replaced with .zip before the data is written to file. If instead the file writing process should be handled outside of PyU4V or uploaded directly to Unisphere without any file handling set return_binary to True. The response dict will have the settings binary data included in key 'binary_data'. :param file_password: password to sign file (required) -- str :param dir_path: file save location -- str :param file_name: zip file name -- str :param return_binary: return settings binary data -- bool :param exclude_alert_notification_settings: exclude alert notification settings -- bool :param exclude_performance_preference_settings: exclude performance preference settings -- bool :param exclude_performance_user_templates: exclude performance user templates -- bool :param exclude_performance_metric_settings: exclude performance metric settings :returns: download details -- dict """ if False not in [ exclude_alert_notification_settings, exclude_performance_preference_settings, exclude_performance_user_templates, exclude_performance_metric_settings ]: msg = ('Invalid Unisphere settings input parameters supplied, at ' 'least one settings category must be not be excluded in ' 'the input parameters.') LOG.error(msg) raise exception.InvalidInputException(msg) exclusion_list = list() if exclude_alert_notification_settings: exclusion_list.append(UNI_ALERT_SETTINGS) if exclude_performance_preference_settings: exclusion_list.append(UNI_PERF_PREF_SETTINGS) if exclude_performance_user_templates: exclusion_list.append(UNI_PERF_USER_SETTINGS) if exclude_performance_metric_settings: exclusion_list.append(UNI_PERF_METRIC_SETTINGS) request_body = { FILE_PASSWORD: file_password, SRC_ARRAY: self.array_id, EXCLUDE_SYS_SETTINGS: [ALL_SETTINGS], EXCLUDE_UNI_SETTINGS: exclusion_list } return self._download_settings(request_body=request_body, dir_path=dir_path, file_name=file_name, return_binary=return_binary) def download_system_settings(self, file_password, dir_path=None, file_name=None, array_id=None, return_binary=False, exclude_alert_policy_settings=False, alert_level_notification_settings=False, exclude_system_threshold_settings=False, exclude_performance_threshold_settings=False): """Export System settings. - System settings: - Alert policies - Alert level notifications - Performance thresholds - System thresholds By default settings will be written to a zip file in the current working directory unless a supplied directory path and/or file name are provided. If any extension is provided in the file name it will be replaced with .zip before the data is written to file. If instead the file writing process should be handled outside of PyU4V or uploaded directly to Unisphere without any file handling set return_binary to True. The response dict will have the settings binary data included in key 'binary_data'. :param file_password: password to sign file (required) -- str :param dir_path: file save location -- str :param file_name: zip file name -- str :param array_id: array id -- str :param return_binary: return settings binary data -- bool :param exclude_alert_policy_settings: exclude alert policy settings -- bool :param alert_level_notification_settings: exclude alert level notification settings -- bool :param exclude_system_threshold_settings: exclude system threshold settings -- bool :param exclude_performance_threshold_settings: exclude performance threshold settings -- bool :returns: export details -- dict """ if False not in [ exclude_alert_policy_settings, exclude_system_threshold_settings, exclude_performance_threshold_settings, alert_level_notification_settings ]: msg = ('Invalid system settings input parameters supplied, at ' 'least one settings category must be not be excluded in ' 'the input parameters.') LOG.error(msg) raise exception.InvalidInputException(msg) exclusion_list = list() if exclude_alert_policy_settings: exclusion_list.append(SYS_ALERT_SETTINGS) if alert_level_notification_settings: exclusion_list.append(SYS_ALERT_NOTIFI_SETTINGS) if exclude_system_threshold_settings: exclusion_list.append(SYS_THRESH_SETTINGS) if exclude_performance_threshold_settings: exclusion_list.append(SYS_PERF_THRESH_SETTINGS) array_id = self.array_id if not array_id else array_id request_body = { FILE_PASSWORD: file_password, SRC_ARRAY: array_id, EXCLUDE_SYS_SETTINGS: exclusion_list, EXCLUDE_UNI_SETTINGS: [ALL_SETTINGS] } return self._download_settings(request_body=request_body, dir_path=dir_path, file_name=file_name, return_binary=return_binary) def upload_settings(self, file_password, file_path=None, array_id=None, binary_data=None): """Upload Unisphere and/or system settings to Unisphere. Allows for importing a zip file or binary data that contains settings that were previously exported from Unisphere. The settings that a file upload may include are: - All settings: - Unisphere settings: - Alert notifications - Performance metrics - Performance preferences - Performance user templates - System settings: - Alert policies - Alert level notifications - Performance thresholds - System thresholds A password that was specified during export needs to be provided during import operation. This is to assure that the imported file has not been tampered with. It is possible to upload system settings for more than one array, to do so pass a list of array IDs in to array_id input parameter. For Unisphere settings an array ID is not required. :param file_password: password that file has been signed with -- str :param file_path: full file location -- str :param array_id: array id -- str :param binary_data: binary settings data -- bytes :returns: upload details -- dict """ # Work around: We need to provide the array ID for all upload requests array_id = self.array_id if not array_id else array_id array_id = ','.join(array_id) if isinstance(array_id, list) else array_id if binary_data: try: assert isinstance(binary_data, bytes) is True data = binary_data except AssertionError as error: msg = ('You must provide valid bytes data before upload to ' 'Unisphere can proceed.') LOG.error(msg) raise exception.InvalidInputException(msg) from error else: try: f_path = Path(file_path) assert f_path.is_file() is True LOG.info('Uploading settings from {p}'.format(p=f_path)) data = open(f_path, FILE_READ_MODE) except (TypeError, AssertionError) as error: msg = ('Invalid file path supplied for settings upload ' 'location: {f}'.format(f=file_path)) LOG.error(msg) raise exception.InvalidInputException(msg) from error form_data = { ZIP_FILE: data, TGT_ARRAYS: array_id, FILE_PASSWORD: file_password } return self.common.upload_file(category=SYSTEM, resource_level=SETTINGS, resource_type=IMPORT_FILE, form_data=form_data) def get_audit_log_list(self, start_time, end_time, array_id=None, user_name=None, host_name=None, client_host=None, message=None, record_id=None, activity_id=None, application_id=None, application_version=None, task_id=None, process_id=None, vendor_id=None, os_type=None, os_revision=None, api_library=None, api_version=None, audit_class=None, action_code=None, function_class=None): """Get a list of audit logs from Unisphere between start and end date. Retrieve a list of audit logs from Unisphere, it is possible to filter this list through the input parameters. Due to the potential to return large amounts of results both start and end time are required. :param start_time: timestamp in milliseconds since epoch -- int :param end_time: timestamp in milliseconds since epoch -- int :param array_id: array serial number -- str :param user_name: Optional value that filters returned list to display Audit Log Entries that contain the specified username only -- str :param host_name: Optional value that filters returned list to display Audit Log Entries that contain the specified host_name only -- str :param client_host: Optional value that filters returned list to display Audit Log Entries that contain the specified client_host only -- str :param message: Optional value that filters returned list to display Audit Log Entries that contain the specified message only -- str :param record_id: Optional value that filters returned list to display Audit Log Entries that have a matching record_id -- int :param activity_id: Optional value that filters returned list to display Audit Log Entries that contain the specified activity_id only -- str :param application_id: Optional value that filters returned list to display Audit Log Entries that contain the specified application_id only -- str :param application_version: Optional value that filters returned list to display Audit Log Entries that contain the specified application_version only -- str :param task_id: Optional value that filters returned list to display Audit Log Entries that contain the specified task_id only -- str :param process_id: Optional value that filters returned list to display Audit Log Entries that contain the specified process_id only -- str :param vendor_id: Optional value that filters returned list to display Audit Log Entries that contain the specified vendor_id only -- str :param os_type: Optional value that filters returned list to display Audit Log Entries that contain the specified os_type only -- str :param os_revision: Optional value that filters returned list to display Audit Log Entries that contain the specified os_revision only -- str :param api_library: Optional value that filters returned list to display Audit Log Entries that contain the specified api_library only -- str :param api_version: Optional value that filters returned list to display Audit Log Entries that contain the specified api_version only -- str :param audit_class: Optional value that filters returned list to display Audit Log Entries that contain the specified audit_class only -- str :param action_code: Optional value that filters returned list to display Audit Log Entries that contain the specified action_code only -- str :param function_class: Optional value that filters returned list to display Audit Log Entries that contain the specified function_class only -- str :returns: audit logs -- list """ array_id = self.array_id if not array_id else array_id start_time = time_handler.format_time_input(start_time, return_seconds=True) end_time = time_handler.format_time_input(end_time, return_seconds=True) # If the time delta is longer than 24 hours... if (end_time - start_time) > (60 * 60 * 24): LOG.warning( 'It is not recommended that queries with large time ranges ' 'are used to retrieve system audit logs. Please consider ' 'refining your search. If large time ranges are required, ' 'please use sparingly and not in frequently run scripts.') target_uri = self.common.build_target_uri( category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=AUDIT_LOG_RECORD) # Form query params manually until audit log endpoint is updated to # accept a dict of filter params - limited due to requirement of more # than one instance of the same parameter key which is not possible # with python dictionaries query_uri = '?' query_uri += 'entry_date=>{start}&entry_date=<{end}'.format( start=start_time, end=end_time) if user_name: query_uri += '&user_name={x}'.format(x=user_name) if host_name: query_uri += '&host_name={x}'.format(x=host_name) if client_host: query_uri += '&client_host={x}'.format(x=client_host) if message: query_uri += '&message={x}'.format(x=message) if record_id: query_uri += '&record_id={x}'.format(x=record_id) if activity_id: query_uri += '&activity_id={x}'.format(x=activity_id) if application_id: query_uri += '&application_id={x}'.format(x=application_id) if application_version: query_uri += '&application_version={x}'.format( x=application_version) if task_id: query_uri += '&task_id={x}'.format(x=task_id) if process_id: query_uri += '&process_id={x}'.format(x=process_id) if vendor_id: query_uri += '&vendor_id={x}'.format(x=vendor_id) if os_type: query_uri += '&os_type={x}'.format(x=os_type) if os_revision: query_uri += '&os_revision={x}'.format(x=os_revision) if api_library: query_uri += '&api_library={x}'.format(x=api_library) if api_version: query_uri += '&api_version={x}'.format(x=api_version) if audit_class: query_uri += '&audit_class={x}'.format(x=audit_class) if action_code: query_uri += '&action_code={x}'.format(x=action_code) if function_class: query_uri += '&function_class={x}'.format(x=function_class) if query_uri != '?': target_uri += query_uri response = self.common.get_request(target_uri=target_uri, resource_type=AUDIT_LOG_RECORD) if response.get(COUNT, 0) > 0: return self.common.get_iterator_results(response) else: return list() def get_audit_log_record(self, record_id, array_id=None): """Get audit log details for a specific record. :param record_id: audit log record id -- int :param array_id: array serial number -- str :returns: audit log record details -- dict """ array_id = self.array_id if not array_id else array_id target_uri = self.common.build_target_uri( category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=AUDIT_LOG_RECORD, resource_type_id=record_id) response = self.common.get_request(target_uri=target_uri, resource_type=AUDIT_LOG_RECORD) return response def download_audit_log_record(self, array_id=None, return_binary=False, dir_path=None, file_name=None): """Download audit log record for the last week in PDF :param array_id: array serial number -- str :param return_binary: return binary data instead of writing audit log record pdf to file -- bool :param dir_path: file write directory path -- str :param file_name: file name -- str :returns: download details -- dict """ array_id = self.array_id if not array_id else array_id date_time = datetime.fromtimestamp(time.time()) if not file_name: file_name = '{fn}-{d}.{e}'.format( fn=AUDIT_LOG_FILENAME_TEMPLATE, d=date_time.strftime(STR_TIME_FORMAT), e=PDF_SUFFIX) req_body = {AUDIT_LOG_FILENAME: file_name} response = self.common.download_file(category=SYSTEM, resource_level=SYMMETRIX, resource_level_id=array_id, resource_type=AUDIT_LOG_RECORD, resource=EXPORT_FILE, payload=req_body) return_dict = dict() # Return binary data, do not write to file if return_binary: return_dict[BINARY_DATA] = response.content # Write to file else: file_path = file_handler.write_binary_data_to_file( data=response, file_extension=PDF_SUFFIX, file_name=file_name, dir_path=dir_path) return_dict[AUDIT_RECORD_PATH] = file_path return_dict[SUCCESS] = True return_dict[AUDIT_RECORD_TIME] = date_time LOG.info('The audit log download request was successful.') return return_dict