Exemplo n.º 1
0
class NetAppOntapAggregate(object):
    ''' object initialize and class methods '''
    def __init__(self):
        self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
        self.argument_spec.update(
            dict(name=dict(required=True, type='str'),
                 disks=dict(required=False, type='list', elements='str'),
                 disk_count=dict(required=False, type='int', default=None),
                 disk_size=dict(required=False, type='int'),
                 disk_size_with_unit=dict(required=False, type='str'),
                 disk_type=dict(required=False,
                                choices=[
                                    'ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN',
                                    'MSATA', 'SAS', 'SSD', 'VMDISK'
                                ]),
                 from_name=dict(required=False, type='str'),
                 mirror_disks=dict(required=False, type='list',
                                   elements='str'),
                 nodes=dict(required=False, type='list', elements='str'),
                 is_mirrored=dict(required=False, type='bool'),
                 raid_size=dict(required=False, type='int'),
                 raid_type=dict(
                     required=False,
                     choices=['raid4', 'raid_dp', 'raid_tec', 'raid_0']),
                 service_state=dict(required=False,
                                    choices=['online', 'offline']),
                 spare_pool=dict(required=False, choices=['Pool0', 'Pool1']),
                 state=dict(required=False,
                            choices=['present', 'absent'],
                            default='present'),
                 unmount_volumes=dict(required=False, type='bool'),
                 wait_for_online=dict(required=False,
                                      type='bool',
                                      default=False),
                 time_out=dict(required=False, type='int', default=100),
                 object_store_name=dict(required=False, type='str'),
                 snaplock_type=dict(
                     required=False,
                     type='str',
                     choices=['compliance', 'enterprise', 'non_snaplock']),
                 ignore_pool_checks=dict(required=False, type='bool')))

        self.module = AnsibleModule(argument_spec=self.argument_spec,
                                    required_if=[
                                        ('service_state', 'offline',
                                         ['unmount_volumes']),
                                    ],
                                    mutually_exclusive=[
                                        ('is_mirrored', 'disks'),
                                        ('is_mirrored', 'mirror_disks'),
                                        ('is_mirrored', 'spare_pool'),
                                        ('spare_pool', 'disks'),
                                        ('disk_count', 'disks'),
                                        ('disk_size', 'disk_size_with_unit')
                                    ],
                                    supports_check_mode=True)

        self.na_helper = NetAppModule()
        self.using_vserver_msg = None  # This module should be run as cluster admin
        self.parameters = self.na_helper.set_parameters(self.module.params)
        if self.parameters.get(
                'mirror_disks'
        ) is not None and self.parameters.get('disks') is None:
            self.module.fail_json(
                msg="mirror_disks require disks options to be set")
        if HAS_NETAPP_LIB is False:
            self.module.fail_json(
                msg="the python NetApp-Lib module is required")
        else:
            self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)

    def aggr_get_iter(self, name):
        """
        Return aggr-get-iter query results
        :param name: Name of the aggregate
        :return: NaElement if aggregate found, None otherwise
        """

        aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
        query_details = netapp_utils.zapi.NaElement.create_node_with_children(
            'aggr-attributes', **{'aggregate-name': name})
        query = netapp_utils.zapi.NaElement('query')
        query.add_child_elem(query_details)
        aggr_get_iter.add_child_elem(query)
        result = None
        try:
            result = self.server.invoke_successfully(aggr_get_iter,
                                                     enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as error:
            # Error 13040 denotes an aggregate not being found.
            if to_native(error.code) == "13040":
                pass
            else:
                msg = to_native(error)
                if self.using_vserver_msg is not None:
                    msg += '.  Added info: %s.' % self.using_vserver_msg
                self.module.fail_json(msg=msg,
                                      exception=traceback.format_exc())
        return result

    def get_aggr(self, name=None):
        """
        Fetch details if aggregate exists.
        :param name: Name of the aggregate to be fetched
        :return:
            Dictionary of current details if aggregate found
            None if aggregate is not found
        """
        if name is None:
            name = self.parameters['name']
        aggr_get = self.aggr_get_iter(name)
        if (aggr_get and aggr_get.get_child_by_name('num-records')
                and int(aggr_get.get_child_content('num-records')) >= 1):
            current_aggr = dict()
            attr = aggr_get.get_child_by_name(
                'attributes-list').get_child_by_name('aggr-attributes')
            current_aggr['service_state'] = attr.get_child_by_name(
                'aggr-raid-attributes').get_child_content('state')
            if attr.get_child_by_name(
                    'aggr-raid-attributes').get_child_content('disk-count'):
                current_aggr['disk_count'] = int(
                    attr.get_child_by_name('aggr-raid-attributes').
                    get_child_content('disk-count'))
            return current_aggr
        return None

    def disk_get_iter(self, name):
        """
        Return storage-disk-get-iter query results
        Filter disk list by aggregate name, and only reports disk-name and plex-name
        :param name: Name of the aggregate
        :return: NaElement
        """

        disk_get_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
        query_details = {
            'query': {
                'storage-disk-info': {
                    'disk-raid-info': {
                        'disk-aggregate-info': {
                            'aggregate-name': name
                        }
                    }
                }
            }
        }
        disk_get_iter.translate_struct(query_details)
        attributes = {
            'desired-attributes': {
                'storage-disk-info': {
                    'disk-name': None,
                    'disk-raid-info': {
                        'disk_aggregate_info': {
                            'plex-name': None
                        }
                    }
                }
            }
        }
        disk_get_iter.translate_struct(attributes)

        result = None
        try:
            result = self.server.invoke_successfully(disk_get_iter,
                                                     enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(msg=to_native(error),
                                  exception=traceback.format_exc())
        return result

    def get_aggr_disks(self, name):
        """
        Fetch disks that are used for this aggregate.
        :param name: Name of the aggregate to be fetched
        :return:
            list of tuples (disk-name, plex-name)
            empty list if aggregate is not found
        """
        disks = list()
        aggr_get = self.disk_get_iter(name)
        if (aggr_get and aggr_get.get_child_by_name('num-records')
                and int(aggr_get.get_child_content('num-records')) >= 1):
            attr = aggr_get.get_child_by_name('attributes-list')
            disks = [
                (disk_info.get_child_content('disk-name'),
                 disk_info.get_child_by_name(
                     'disk-raid-info').get_child_by_name(
                         'disk-aggregate-info').get_child_content('plex-name'))
                for disk_info in attr.get_children()
            ]
        return disks

    def object_store_get_iter(self):
        """
        Return aggr-object-store-get query results
        :return: NaElement if object-store for given aggregate found, None otherwise
        """

        object_store_get_iter = netapp_utils.zapi.NaElement(
            'aggr-object-store-get-iter')
        query_details = netapp_utils.zapi.NaElement.create_node_with_children(
            'object-store-information', **{
                'object-store-name': self.parameters.get('object_store_name'),
                'aggregate': self.parameters.get('name')
            })
        query = netapp_utils.zapi.NaElement('query')
        query.add_child_elem(query_details)
        object_store_get_iter.add_child_elem(query)
        result = None
        try:
            result = self.server.invoke_successfully(object_store_get_iter,
                                                     enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(msg=to_native(error),
                                  exception=traceback.format_exc())
        return result

    def get_object_store(self):
        """
        Fetch details if object store attached to the given aggregate exists.
        :return:
            Dictionary of current details if object store attached to the given aggregate is found
            None if object store is not found
        """
        object_store_get = self.object_store_get_iter()
        if (object_store_get
                and object_store_get.get_child_by_name('num-records') and
                int(object_store_get.get_child_content('num-records')) >= 1):
            current_object_store = dict()
            attr = object_store_get.get_child_by_name('attributes-list').\
                get_child_by_name('object-store-information')
            current_object_store['object_store_name'] = attr.get_child_content(
                'object-store-name')
            return current_object_store
        return None

    def aggregate_online(self):
        """
        Set state of an offline aggregate to online
        :return: None
        """
        online_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
            'aggr-online', **{
                'aggregate': self.parameters['name'],
                'force-online': 'true'
            })
        try:
            self.server.invoke_successfully(online_aggr, enable_tunneling=True)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(
                msg='Error changing the state of aggregate %s to %s: %s' %
                (self.parameters['name'], self.parameters['service_state'],
                 to_native(error)),
                exception=traceback.format_exc())

    def aggregate_offline(self):
        """
        Set state of an online aggregate to offline
        :return: None
        """
        offline_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
            'aggr-offline', **{
                'aggregate': self.parameters['name'],
                'force-offline': 'false',
                'unmount-volumes': str(self.parameters['unmount_volumes'])
            })
        try:
            self.server.invoke_successfully(offline_aggr,
                                            enable_tunneling=True)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(
                msg='Error changing the state of aggregate %s to %s: %s' %
                (self.parameters['name'], self.parameters['service_state'],
                 to_native(error)),
                exception=traceback.format_exc())

    @staticmethod
    def get_disks_or_mirror_disks_object(name, disks):
        '''
        create ZAPI object for disks or mirror_disks
        '''
        disks_obj = netapp_utils.zapi.NaElement(name)
        for disk in disks:
            disk_info_obj = netapp_utils.zapi.NaElement('disk-info')
            disk_info_obj.add_new_child('name', disk)
            disks_obj.add_child_elem(disk_info_obj)
        return disks_obj

    def create_aggr(self):
        """
        Create aggregate
        :return: None
        """
        options = {'aggregate': self.parameters['name']}
        if self.parameters.get('disk_count'):
            options['disk-count'] = str(self.parameters['disk_count'])
        if self.parameters.get('disk_type'):
            options['disk-type'] = self.parameters['disk_type']
        if self.parameters.get('raid_size'):
            options['raid-size'] = str(self.parameters['raid_size'])
        if self.parameters.get('raid_type'):
            options['raid-type'] = self.parameters['raid_type']
        if self.parameters.get('disk_size'):
            options['disk-size'] = str(self.parameters['disk_size'])
        if self.parameters.get('disk_size_with_unit'):
            options['disk-size-with-unit'] = str(
                self.parameters['disk_size_with_unit'])
        if self.parameters.get('is_mirrored'):
            options['is-mirrored'] = str(self.parameters['is_mirrored'])
        if self.parameters.get('spare_pool'):
            options['spare-pool'] = self.parameters['spare_pool']
        if self.parameters.get('raid_type'):
            options['raid-type'] = self.parameters['raid_type']
        if self.parameters.get('snaplock_type'):
            options['snaplock-type'] = self.parameters['snaplock_type']
        if self.parameters.get('ignore_pool_checks'):
            options['ignore-pool-checks'] = str(
                self.parameters['ignore_pool_checks'])
        aggr_create = netapp_utils.zapi.NaElement.create_node_with_children(
            'aggr-create', **options)
        if self.parameters.get('nodes'):
            nodes_obj = netapp_utils.zapi.NaElement('nodes')
            aggr_create.add_child_elem(nodes_obj)
            for node in self.parameters['nodes']:
                nodes_obj.add_new_child('node-name', node)
        if self.parameters.get('disks'):
            aggr_create.add_child_elem(
                self.get_disks_or_mirror_disks_object(
                    'disks', self.parameters.get('disks')))
        if self.parameters.get('mirror_disks'):
            aggr_create.add_child_elem(
                self.get_disks_or_mirror_disks_object(
                    'mirror-disks', self.parameters.get('mirror_disks')))

        try:
            self.server.invoke_successfully(aggr_create,
                                            enable_tunneling=False)
            if self.parameters.get('wait_for_online'):
                # round off time_out
                retries = (self.parameters['time_out'] + 5) / 10
                current = self.get_aggr()
                status = None if current is None else current['service_state']
                while status != 'online' and retries > 0:
                    time.sleep(10)
                    retries = retries - 1
                    current = self.get_aggr()
                    status = None if current is None else current[
                        'service_state']
            else:
                current = self.get_aggr()
            if current is not None and current.get(
                    'disk_count') != self.parameters.get('disk_count'):
                self.module.exit_json(
                    changed=self.na_helper.changed,
                    warnings=
                    "Aggregate created with mismatched disk_count: created %s not %s"
                    % (current.get('disk_count'),
                       self.parameters.get('disk_count')))
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(msg="Error provisioning aggregate %s: %s" %
                                  (self.parameters['name'], to_native(error)),
                                  exception=traceback.format_exc())

    def delete_aggr(self):
        """
        Delete aggregate.
        :return: None
        """
        aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
            'aggr-destroy', **{'aggregate': self.parameters['name']})

        try:
            self.server.invoke_successfully(aggr_destroy,
                                            enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(msg="Error removing aggregate %s: %s" %
                                  (self.parameters['name'], to_native(error)),
                                  exception=traceback.format_exc())

    def rename_aggregate(self):
        """
        Rename aggregate.
        """
        aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
            'aggr-rename', **{
                'aggregate': self.parameters['from_name'],
                'new-aggregate-name': self.parameters['name']
            })

        try:
            self.server.invoke_successfully(aggr_rename,
                                            enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(
                msg="Error renaming aggregate %s: %s" %
                (self.parameters['from_name'], to_native(error)),
                exception=traceback.format_exc())

    def modify_aggr(self, modify):
        """
        Modify state of the aggregate
        :param modify: dictionary of parameters to be modified
        :return: None
        """
        if modify.get('service_state') == 'offline':
            self.aggregate_offline()
        else:
            disk_size = 0
            disk_size_with_unit = None
            if modify.get('service_state') == 'online':
                self.aggregate_online()
            if modify.get('disk_size'):
                disk_size = modify.get('disk_size')
            if modify.get('disk_size_with_unit'):
                disk_size_with_unit = modify.get('disk_size_with_unit')
            if modify.get('disk_count'):
                self.add_disks(modify['disk_count'],
                               disk_size=disk_size,
                               disk_size_with_unit=disk_size_with_unit)
            if modify.get('disks_to_add') or modify.get('mirror_disks_to_add'):
                self.add_disks(0, modify.get('disks_to_add'),
                               modify.get('mirror_disks_to_add'))

    def attach_object_store_to_aggr(self):
        """
        Attach object store to aggregate.
        :return: None
        """
        attach_object_store = netapp_utils.zapi.NaElement.create_node_with_children(
            'aggr-object-store-attach', **{
                'aggregate': self.parameters['name'],
                'object-store-name': self.parameters['object_store_name']
            })

        try:
            self.server.invoke_successfully(attach_object_store,
                                            enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(
                msg="Error attaching object store %s to aggregate %s: %s" %
                (self.parameters['object_store_name'], self.parameters['name'],
                 to_native(error)),
                exception=traceback.format_exc())

    def add_disks(self,
                  count=0,
                  disks=None,
                  mirror_disks=None,
                  disk_size=0,
                  disk_size_with_unit=None):
        """
        Add additional disks to aggregate.
        :return: None
        """
        options = {'aggregate': self.parameters['name']}
        if count:
            options['disk-count'] = str(count)
        if disks and self.parameters.get('ignore_pool_checks'):
            options['ignore-pool-checks'] = str(
                self.parameters['ignore_pool_checks'])
        if disk_size:
            options['disk-size'] = str(disk_size)
        if disk_size_with_unit:
            options['disk-size-with-unit'] = disk_size_with_unit
        aggr_add = netapp_utils.zapi.NaElement.create_node_with_children(
            'aggr-add', **options)
        if disks:
            aggr_add.add_child_elem(
                self.get_disks_or_mirror_disks_object('disks', disks))
        if mirror_disks:
            aggr_add.add_child_elem(
                self.get_disks_or_mirror_disks_object('mirror-disks',
                                                      mirror_disks))

        try:
            self.server.invoke_successfully(aggr_add, enable_tunneling=True)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(
                msg='Error adding additional disks to aggregate %s: %s' %
                (self.parameters['name'], to_native(error)),
                exception=traceback.format_exc())

    def asup_log_for_cserver(self, event_name):
        """
        Fetch admin vserver for the given cluster
        Create and Autosupport log event with the given module name
        :param event_name: Name of the event log
        :return: None
        """
        cserver = netapp_utils.get_cserver(self.server)
        if cserver is None:
            server = self.server
            self.using_vserver_msg = netapp_utils.ERROR_MSG['no_cserver']
            event_name += ':error_no_cserver'
        else:
            server = netapp_utils.setup_na_ontap_zapi(module=self.module,
                                                      vserver=cserver)
        netapp_utils.ems_log_event(event_name, server)

    def map_plex_to_primary_and_mirror(self, plex_disks, disks, mirror_disks):
        '''
        we have N plexes, and disks, and maybe mirror_disks
        we're trying to find which plex is used for disks, and which one, if applicable, for mirror_disks
        :return: a tuple with the names of the two plexes (disks_plex, mirror_disks_plex)
        the second one can be None
        '''
        disks_plex = None
        mirror_disks_plex = None
        error = None
        for plex in plex_disks:
            common = set(plex_disks[plex]).intersection(set(disks))
            if common:
                if disks_plex is None:
                    disks_plex = plex
                else:
                    error = 'found overlapping plexes: %s and %s' % (
                        disks_plex, plex)
            if mirror_disks is not None:
                common = set(plex_disks[plex]).intersection(set(mirror_disks))
                if common:
                    if mirror_disks_plex is None:
                        mirror_disks_plex = plex
                    else:
                        error = 'found overlapping mirror plexes: %s and %s' % (
                            mirror_disks_plex, plex)
        if error is None:
            # make sure we found a match
            if disks_plex is None:
                error = 'cannot not match disks with current aggregate disks'
            if mirror_disks is not None and mirror_disks_plex is None:
                if error is not None:
                    error += ', and '
                error = 'cannot not match mirror_disks with current aggregate disks'
        if error:
            self.module.fail_json(
                msg="Error mapping disks for aggregate %s: %s.  Found: %s" %
                (self.parameters['name'], error, str(plex_disks)))
        return disks_plex, mirror_disks_plex

    def get_disks_to_add(self, aggr_name, disks, mirror_disks):
        '''
        Get list of disks used by the aggregate, as primary and mirror.
        Report error if:
          the plexes in use cannot be matched with user inputs (we expect some overlap)
          the user request requires some disks to be removed (not supported)
        : return: a tuple of two lists of disks: disks_to_add, mirror_disks_to_add
        '''
        # let's see if we need to add disks
        disks_in_use = self.get_aggr_disks(aggr_name)
        # we expect a list of tuples (disk_name, plex_name), if there is a mirror, we should have 2 plexes
        # let's get a list of disks for each plex
        plex_disks = dict()
        for disk_name, plex_name in disks_in_use:
            plex_disks.setdefault(plex_name, []).append(disk_name)
        # find who is who
        disks_plex, mirror_disks_plex = self.map_plex_to_primary_and_mirror(
            plex_disks, disks, mirror_disks)
        # Now that we know what is which, find what needs to be removed (error), and what needs to be added
        disks_to_remove = [
            disk for disk in plex_disks[disks_plex] if disk not in disks
        ]
        if mirror_disks_plex:
            disks_to_remove.extend([
                disk for disk in plex_disks[mirror_disks_plex]
                if disk not in mirror_disks
            ])
        if disks_to_remove:
            error = 'these disks cannot be removed: %s' % str(disks_to_remove)
            self.module.fail_json(
                msg=
                "Error removing disks is not supported.  Aggregate %s: %s.  In use: %s"
                % (aggr_name, error, str(plex_disks)))
        # finally, what's to be added
        disks_to_add = [
            disk for disk in disks if disk not in plex_disks[disks_plex]
        ]
        mirror_disks_to_add = list()
        if mirror_disks_plex:
            mirror_disks_to_add = [
                disk for disk in mirror_disks
                if disk not in plex_disks[mirror_disks_plex]
            ]
        if mirror_disks_to_add and not disks_to_add:
            self.module.fail_json(
                msg=
                "Error cannot add mirror disks %s without adding disks for aggregate %s.  In use: %s"
                % (str(mirror_disks_to_add), aggr_name, str(plex_disks)))
        if disks_to_add or mirror_disks_to_add:
            self.na_helper.changed = True

        return disks_to_add, mirror_disks_to_add

    def apply(self):
        """
        Apply action to the aggregate
        :return: None
        """
        self.asup_log_for_cserver("na_ontap_aggregate")
        object_store_cd_action = None
        current = self.get_aggr()
        # rename and create are mutually exclusive
        rename, cd_action, object_store_current = None, None, None
        if self.parameters.get('from_name'):
            rename = self.na_helper.is_rename_action(
                self.get_aggr(self.parameters['from_name']), current)
            if rename is None:
                self.module.fail_json(
                    msg="Error renaming: aggregate %s does not exist" %
                    self.parameters['from_name'])
        else:
            cd_action = self.na_helper.get_cd_action(current, self.parameters)
        modify = self.na_helper.get_modified_attributes(
            current, self.parameters)

        if cd_action is None and self.parameters.get(
                'disks') and current is not None:
            modify['disks_to_add'], modify['mirror_disks_to_add'] = \
                self.get_disks_to_add(self.parameters['name'], self.parameters['disks'], self.parameters.get('mirror_disks'))

        if modify.get('disk_count'):
            if int(modify['disk_count']) < int(current['disk_count']):
                self.module.fail_json(
                    msg=
                    "specified disk_count is less than current disk_count. Only adding_disk is allowed."
                )
            else:
                modify['disk_count'] = modify['disk_count'] - current[
                    'disk_count']

        if self.parameters.get(
                'object_store_name') and cd_action is None and rename is None:
            object_store_current = self.get_object_store()
            object_store_cd_action = self.na_helper.get_cd_action(
                object_store_current, self.parameters.get('object_store_name'))

        if self.na_helper.changed:
            if self.module.check_mode:
                pass
            else:
                if rename:
                    self.rename_aggregate()
                elif cd_action == 'create':
                    self.create_aggr()
                elif cd_action == 'delete':
                    self.delete_aggr()
                else:
                    if modify:
                        self.modify_aggr(modify)
                    if object_store_cd_action is not None:
                        self.attach_object_store_to_aggr()
        self.module.exit_json(changed=self.na_helper.changed)
Exemplo n.º 2
0
class ElementSWClusterSnmp(object):
    """
    Element Software Configure Element SW Cluster SnmpNetwork
    """
    def __init__(self):
        self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()

        self.argument_spec.update(
            dict(
                state=dict(type='str',
                           choices=['present', 'absent'],
                           default='present'),
                snmp_v3_enabled=dict(type='bool'),
                networks=dict(type='dict',
                              options=dict(access=dict(
                                  type='str', choices=['ro', 'rw', 'rosys']),
                                           cidr=dict(type='int', default=None),
                                           community=dict(type='str',
                                                          default=None),
                                           network=dict(type='str',
                                                        default=None))),
                usm_users=dict(
                    type='dict',
                    options=dict(
                        access=dict(type='str',
                                    choices=['rouser', 'rwuser', 'rosys']),
                        name=dict(type='str', default=None),
                        password=dict(type='str', default=None),
                        passphrase=dict(type='str', default=None),
                        secLevel=dict(type='str',
                                      choices=['auth', 'noauth', 'priv']))),
            ))

        self.module = AnsibleModule(
            argument_spec=self.argument_spec,
            required_if=[('state', 'present', ['snmp_v3_enabled']),
                         ('snmp_v3_enabled', True, ['usm_users']),
                         ('snmp_v3_enabled', False, ['networks'])],
            supports_check_mode=True)

        self.na_helper = NetAppModule()
        self.parameters = self.na_helper.set_parameters(self.module.params)

        if self.parameters.get('state') == "present":
            if self.parameters.get('usm_users') is not None:
                # Getting the configuration details to configure SNMP Version3
                self.access_usm = self.parameters.get('usm_users')['access']
                self.name = self.parameters.get('usm_users')['name']
                self.password = self.parameters.get('usm_users')['password']
                self.passphrase = self.parameters.get(
                    'usm_users')['passphrase']
                self.secLevel = self.parameters.get('usm_users')['secLevel']
            if self.parameters.get('networks') is not None:
                # Getting the configuration details to configure SNMP Version2
                self.access_network = self.parameters.get('networks')['access']
                self.cidr = self.parameters.get('networks')['cidr']
                self.community = self.parameters.get('networks')['community']
                self.network = self.parameters.get('networks')['network']

        if HAS_SF_SDK is False:
            self.module.fail_json(
                msg="Unable to import the SolidFire Python SDK")
        else:
            self.sfe = netapp_utils.create_sf_connection(module=self.module)

    def enable_snmp(self):
        """
        enable snmp feature
        """
        try:
            self.sfe.enable_snmp(
                snmp_v3_enabled=self.parameters.get('snmp_v3_enabled'))
        except Exception as exception_object:
            self.module.fail_json(msg='Error enabling snmp feature %s' %
                                  to_native(exception_object),
                                  exception=traceback.format_exc())

    def disable_snmp(self):
        """
        disable snmp feature
        """
        try:
            self.sfe.disable_snmp()
        except Exception as exception_object:
            self.module.fail_json(msg='Error disabling snmp feature %s' %
                                  to_native(exception_object),
                                  exception=traceback.format_exc())

    def configure_snmp(self, actual_networks, actual_usm_users):
        """
        Configure snmp
        """
        try:
            self.sfe.set_snmp_acl(networks=[actual_networks],
                                  usm_users=[actual_usm_users])

        except Exception as exception_object:
            self.module.fail_json(msg='Error Configuring snmp feature %s' %
                                  to_native(exception_object.message),
                                  exception=traceback.format_exc())

    def apply(self):
        """
        Cluster SNMP configuration
        """
        changed = False
        result_message = None
        update_required = False
        version_change = False
        is_snmp_enabled = self.sfe.get_snmp_state().enabled

        if is_snmp_enabled is True:
            # IF SNMP is already enabled
            if self.parameters.get('state') == 'absent':
                # Checking for state change(s) here, and applying it later in the code allows us to support
                # check_mode
                changed = True

            elif self.parameters.get('state') == 'present':
                # Checking if SNMP configuration needs to be updated,
                is_snmp_v3_enabled = self.sfe.get_snmp_state().snmp_v3_enabled

                if is_snmp_v3_enabled != self.parameters.get(
                        'snmp_v3_enabled'):
                    # Checking if there any version changes required
                    version_change = True
                    changed = True

                if is_snmp_v3_enabled is True:
                    # Checking If snmp configuration for usm_users needs modification
                    if len(self.sfe.get_snmp_info().usm_users) == 0:
                        # If snmp is getting configured for first time
                        update_required = True
                        changed = True
                    else:
                        for usm_user in self.sfe.get_snmp_info().usm_users:
                            if usm_user.access != self.access_usm or usm_user.name != self.name or usm_user.password != self.password or \
                               usm_user.passphrase != self.passphrase or usm_user.sec_level != self.secLevel:
                                update_required = True
                                changed = True
                else:
                    # Checking If snmp configuration for networks needs modification
                    for snmp_network in self.sfe.get_snmp_info().networks:
                        if snmp_network.access != self.access_network or snmp_network.cidr != self.cidr or \
                           snmp_network.community != self.community or snmp_network.network != self.network:
                            update_required = True
                            changed = True

        else:
            if self.parameters.get('state') == 'present':
                changed = True

        result_message = ""

        if changed:
            if self.module.check_mode is True:
                result_message = "Check mode, skipping changes"

            else:
                if self.parameters.get('state') == "present":
                    # IF snmp is not enabled, then enable and configure snmp
                    if self.parameters.get('snmp_v3_enabled') is True:
                        # IF SNMP is enabled with version 3
                        usm_users = {
                            'access': self.access_usm,
                            'name': self.name,
                            'password': self.password,
                            'passphrase': self.passphrase,
                            'secLevel': self.secLevel
                        }
                        networks = None
                    else:
                        # IF SNMP is enabled with version 2
                        usm_users = None
                        networks = {
                            'access': self.access_network,
                            'cidr': self.cidr,
                            'community': self.community,
                            'network': self.network
                        }

                    if is_snmp_enabled is False or version_change is True:
                        # Enable and configure snmp
                        self.enable_snmp()
                        self.configure_snmp(networks, usm_users)
                        result_message = "SNMP is enabled and configured"

                    elif update_required is True:
                        # If snmp is already enabled, update the configuration if required
                        self.configure_snmp(networks, usm_users)
                        result_message = "SNMP is configured"

                elif is_snmp_enabled is True and self.parameters.get(
                        'state') == "absent":
                    # If snmp is enabled and state is absent, disable snmp
                    self.disable_snmp()
                    result_message = "SNMP is disabled"

        self.module.exit_json(changed=changed, msg=result_message)
class NetAppOntapVolumeEfficiency(object):
    """
        Creates, Modifies and Disables a Volume Efficiency
    """
    def __init__(self):
        """
            Initialize the ONTAP Volume Efficiency class
        """
        self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
        self.argument_spec.update(
            dict(state=dict(required=False,
                            choices=['present', 'absent'],
                            default='present'),
                 vserver=dict(required=True, type='str'),
                 path=dict(required=True, type='str'),
                 schedule=dict(required=False, type='str'),
                 policy=dict(required=False,
                             choices=['auto', 'default', 'inline-only', '-'],
                             type='str'),
                 enable_inline_compression=dict(required=False, type='bool'),
                 enable_compression=dict(required=False, type='bool'),
                 enable_inline_dedupe=dict(required=False, type='bool'),
                 enable_data_compaction=dict(required=False, type='bool'),
                 enable_cross_volume_inline_dedupe=dict(required=False,
                                                        type='bool'),
                 enable_cross_volume_background_dedupe=dict(required=False,
                                                            type='bool')))

        self.module = AnsibleModule(argument_spec=self.argument_spec,
                                    supports_check_mode=True,
                                    mutually_exclusive=[('policy', 'schedule')
                                                        ])

        # set up variables
        self.na_helper = NetAppModule()
        self.parameters = self.na_helper.set_parameters(self.module.params)
        if self.parameters['state'] == 'present':
            self.parameters['enabled'] = 'enabled'
        else:
            self.parameters['enabled'] = 'disabled'

        self.rest_api = OntapRestAPI(self.module)
        self.use_rest = self.rest_api.is_rest()

        if not self.use_rest:
            if HAS_NETAPP_LIB is False:
                self.module.fail_json(
                    msg="the python NetApp-Lib module is required")
            else:
                self.server = netapp_utils.setup_na_ontap_zapi(
                    module=self.module, vserver=self.parameters['vserver'])

    def get_volume_efficiency(self):
        """
        get the storage efficiency for a given path
        :return: dict of sis if exist, None if not
        """

        return_value = None

        if self.use_rest:
            api = 'private/cli/volume/efficiency'
            query = {
                'fields':
                'path,volume,state,schedule,compression,inline_compression,inline_dedupe,policy,data_compaction,'
                'cross_volume_inline_dedupe,cross_volume_background_dedupe',
                'path':
                self.parameters['path'],
                'vserver':
                self.parameters['vserver']
            }
            message, error = self.rest_api.get(api, query)

            if error:
                self.module.fail_json(msg=error)
            if len(message.keys()) == 0:
                return None
            if 'records' in message and len(message['records']) == 0:
                return None
            if 'records' not in message:
                error = "Unexpected response in api call from %s: %s" % (
                    api, repr(message))
                self.module.fail_json(msg=error)
            return_value = {
                'path':
                message['records'][0]['path'],
                'enabled':
                message['records'][0]['state'],
                'schedule':
                message['records'][0]['schedule'],
                'enable_inline_compression':
                message['records'][0]['inline_compression'],
                'enable_compression':
                message['records'][0]['compression'],
                'enable_inline_dedupe':
                message['records'][0]['inline_dedupe'],
                'enable_data_compaction':
                message['records'][0]['data_compaction'],
                'enable_cross_volume_inline_dedupe':
                message['records'][0]['cross_volume_inline_dedupe'],
                'enable_cross_volume_background_dedupe':
                message['records'][0]['cross_volume_background_dedupe']
            }

            if 'policy' in message['records'][0]:
                return_value['policy'] = message['records'][0]['policy']
            else:
                return_value['policy'] = '-'
            return return_value

        else:

            sis_get_iter = netapp_utils.zapi.NaElement('sis-get-iter')
            sis_status_info = netapp_utils.zapi.NaElement('sis-status-info')
            sis_status_info.add_new_child('path', self.parameters['path'])
            query = netapp_utils.zapi.NaElement('query')
            query.add_child_elem(sis_status_info)
            sis_get_iter.add_child_elem(query)
            result = self.server.invoke_successfully(sis_get_iter, True)

            try:

                if result.get_child_by_name('attributes-list'):
                    sis_status_attributes = result['attributes-list'][
                        'sis-status-info']
                    return_value = {
                        'path':
                        sis_status_attributes['path'],
                        'enabled':
                        sis_status_attributes['state'],
                        'schedule':
                        sis_status_attributes['schedule'],
                        'enable_inline_compression':
                        self.na_helper.get_value_for_bool(
                            True,
                            sis_status_attributes.get_child_content(
                                'is-inline-compression-enabled')),
                        'enable_compression':
                        self.na_helper.get_value_for_bool(
                            True,
                            sis_status_attributes.get_child_content(
                                'is-compression-enabled')),
                        'enable_inline_dedupe':
                        self.na_helper.get_value_for_bool(
                            True,
                            sis_status_attributes.get_child_content(
                                'is-inline-dedupe-enabled')),
                        'enable_data_compaction':
                        self.na_helper.get_value_for_bool(
                            True,
                            sis_status_attributes.get_child_content(
                                'is-data-compaction-enabled')),
                        'enable_cross_volume_inline_dedupe':
                        self.na_helper.get_value_for_bool(
                            True,
                            sis_status_attributes.get_child_content(
                                'is-cross-volume-inline-dedupe-enabled')),
                        'enable_cross_volume_background_dedupe':
                        self.na_helper.get_value_for_bool(
                            True,
                            sis_status_attributes.get_child_content(
                                'is-cross-volume-background-dedupe-enabled'))
                    }

                    if sis_status_attributes.get_child_by_name('policy'):
                        return_value['policy'] = sis_status_attributes[
                            'policy']
                    else:
                        return_value['policy'] = '-'

            except netapp_utils.zapi.NaApiError as error:
                self.module.fail_json(
                    msg=
                    'Error getting volume efficiency for path %s on vserver %s: %s'
                    % (self.parameters['path'], self.parameters['vserver'],
                       to_native(error)),
                    exception=traceback.format_exc())

            return return_value

    def enable_volume_efficiency(self):
        """
        Enables Volume efficiency for a given volume by path
        """

        if self.use_rest:
            api = 'private/cli/volume/efficiency/on'
            body = dict()
            query = {
                'path': self.parameters['path'],
                'vserver': self.parameters['vserver']
            }
            message, error = self.rest_api.patch(api, body, query)

            if error:
                self.module.fail_json(msg=error)
            elif message['num_records'] == 0:
                error = 'Error enabling storage efficiency for path %s on vserver %s as the path provided does not exist.' % (
                    self.parameters['path'], self.parameters['vserver'])
                self.module.fail_json(msg=error)

        else:
            sis_enable = netapp_utils.zapi.NaElement("sis-enable")
            sis_enable.add_new_child("path", self.parameters['path'])

            try:
                self.server.invoke_successfully(sis_enable, True)
            except netapp_utils.zapi.NaApiError as error:
                self.module.fail_json(
                    msg=
                    'Error enabling storage efficiency for path %s on vserver %s: %s'
                    % (self.parameters['path'], self.parameters['vserver'],
                       to_native(error)),
                    exception=traceback.format_exc())

    def disable_volume_efficiency(self):
        """
        Disables Volume efficiency for a given volume by path
        """
        if self.use_rest:
            api = 'private/cli/volume/efficiency/off'
            body = dict()
            query = {
                'path': self.parameters['path'],
                'vserver': self.parameters['vserver']
            }
            dummy, error = self.rest_api.patch(api, body, query)
            if error:
                self.module.fail_json(msg=error)

        else:

            sis_disable = netapp_utils.zapi.NaElement("sis-disable")
            sis_disable.add_new_child("path", self.parameters['path'])

            try:
                self.server.invoke_successfully(sis_disable, True)
            except netapp_utils.zapi.NaApiError as error:
                self.module.fail_json(
                    msg='Error disabling storage efficiency for path %s: %s' %
                    (self.parameters['path'], to_native(error)),
                    exception=traceback.format_exc())

    def modify_volume_efficiency(self):
        """
        Modifies volume efficiency settings for a given volume by path
        """

        if self.use_rest:
            api = 'private/cli/volume/efficiency'
            body = dict()
            query = {
                'path': self.parameters['path'],
                'vserver': self.parameters['vserver']
            }

            if 'schedule' in self.parameters:
                body['schedule'] = self.parameters['schedule']
            if 'policy' in self.parameters:
                body['policy'] = self.parameters['policy']
            if 'enable_compression' in self.parameters:
                body['compression'] = self.parameters['enable_compression']
            if 'enable_inline_compression' in self.parameters:
                body['inline_compression'] = self.parameters[
                    'enable_inline_compression']
            if 'enable_inline_dedupe' in self.parameters:
                body['inline_dedupe'] = self.parameters['enable_inline_dedupe']
            if 'enable_data_compaction' in self.parameters:
                body['data_compaction'] = self.parameters[
                    'enable_data_compaction']
            if 'enable_cross_volume_inline_dedupe' in self.parameters:
                body['cross_volume_inline_dedupe'] = self.parameters[
                    'enable_cross_volume_inline_dedupe']
            if 'enable_cross_volume_background_dedupe' in self.parameters:
                body['cross_volume_background_dedupe'] = self.parameters[
                    'enable_cross_volume_background_dedupe']

            dummy, error = self.rest_api.patch(api, body, query)
            if error:
                self.module.fail_json(msg=error)

        else:

            sis_config_obj = netapp_utils.zapi.NaElement("sis-set-config")
            sis_config_obj.add_new_child('path', self.parameters['path'])
            if 'schedule' in self.parameters:
                sis_config_obj.add_new_child('schedule',
                                             self.parameters['schedule'])
            if 'policy' in self.parameters:
                sis_config_obj.add_new_child('policy-name',
                                             self.parameters['policy'])
            if 'enable_compression' in self.parameters:
                sis_config_obj.add_new_child(
                    'enable-compression',
                    self.na_helper.get_value_for_bool(
                        False, self.parameters['enable_compression']))
            if 'enable_inline_compression' in self.parameters:
                sis_config_obj.add_new_child(
                    'enable-inline-compression',
                    self.na_helper.get_value_for_bool(
                        False, self.parameters['enable_inline_compression']))
            if 'enable_inline_dedupe' in self.parameters:
                sis_config_obj.add_new_child(
                    'enable-inline-dedupe',
                    self.na_helper.get_value_for_bool(
                        False, self.parameters['enable_inline_dedupe']))
            if 'enable_data_compaction' in self.parameters:
                sis_config_obj.add_new_child(
                    'enable-data-compaction',
                    self.na_helper.get_value_for_bool(
                        False, self.parameters['enable_data_compaction']))
            if 'enable_cross_volume_inline_dedupe' in self.parameters:
                sis_config_obj.add_new_child(
                    'enable-cross-volume-inline-dedupe',
                    self.na_helper.get_value_for_bool(
                        False,
                        self.parameters['enable_cross_volume_inline_dedupe']))
            if 'enable_cross_volume_background_dedupe' in self.parameters:
                sis_config_obj.add_new_child(
                    'enable-cross-volume-background-dedupe',
                    self.na_helper.get_value_for_bool(
                        False, self.
                        parameters['enable_cross_volume_background_dedupe']))

            try:
                self.server.invoke_successfully(sis_config_obj, True)
            except netapp_utils.zapi.NaApiError as error:
                self.module.fail_json(
                    msg='Error modifying storage efficiency for path %s: %s' %
                    (self.parameters['path'], to_native(error)),
                    exception=traceback.format_exc())

    def apply(self):
        if not self.use_rest:
            netapp_utils.ems_log_event("na_ontap_volume_efficiency",
                                       self.server)

        current = self.get_volume_efficiency()

        # If the volume efficiency does not exist for a given path to create this current is set to disabled
        # this is for ONTAP systems that do not enable efficiency by default.
        if current is None:
            current = {'enabled': 'disabled'}

        modify = self.na_helper.get_modified_attributes(
            current, self.parameters)

        if self.na_helper.changed:
            if not self.module.check_mode:
                if self.parameters['state'] == 'present' and current[
                        'enabled'] == 'disabled':
                    self.enable_volume_efficiency()
                elif self.parameters['state'] == 'absent' and current[
                        'enabled'] == 'enabled':
                    self.disable_volume_efficiency()

                if 'enabled' in modify:
                    del modify['enabled']
                # Removed the enabled key if there is anything remaining in the modify dict we need to modify.
                if modify:
                    self.modify_volume_efficiency()

        self.module.exit_json(changed=self.na_helper.changed)
Exemplo n.º 4
0
class NetAppONTAPCifsShare(object):
    """
    Methods to create/delete/modify(path) CIFS share
    """
    def __init__(self):
        self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
        self.argument_spec.update(
            dict(state=dict(required=False,
                            type='str',
                            choices=['present', 'absent'],
                            default='present'),
                 share_name=dict(required=True, type='str'),
                 path=dict(required=False, type='str'),
                 vserver=dict(required=True, type='str'),
                 share_properties=dict(required=False,
                                       type='list',
                                       elements='str'),
                 symlink_properties=dict(required=False,
                                         type='list',
                                         elements='str'),
                 vscan_fileop_profile=dict(
                     required=False,
                     type='str',
                     choices=['no_scan', 'standard', 'strict',
                              'writes_only'])))

        self.module = AnsibleModule(argument_spec=self.argument_spec,
                                    supports_check_mode=True)

        self.na_helper = NetAppModule()
        self.parameters = self.na_helper.set_parameters(self.module.params)

        if HAS_NETAPP_LIB is False:
            self.module.fail_json(
                msg="the python NetApp-Lib module is required")
        else:
            self.server = netapp_utils.setup_na_ontap_zapi(
                module=self.module, vserver=self.parameters.get('vserver'))

    def get_cifs_share(self):
        """
        Return details about the cifs-share
        :param:
            name : Name of the cifs-share
        :return: Details about the cifs-share. None if not found.
        :rtype: dict
        """
        cifs_iter = netapp_utils.zapi.NaElement('cifs-share-get-iter')
        cifs_info = netapp_utils.zapi.NaElement('cifs-share')
        cifs_info.add_new_child('share-name',
                                self.parameters.get('share_name'))
        cifs_info.add_new_child('vserver', self.parameters.get('vserver'))

        query = netapp_utils.zapi.NaElement('query')
        query.add_child_elem(cifs_info)

        cifs_iter.add_child_elem(query)

        result = self.server.invoke_successfully(cifs_iter, True)

        return_value = None
        # check if query returns the expected cifs-share
        if result.get_child_by_name('num-records') and \
                int(result.get_child_content('num-records')) == 1:
            properties_list = []
            symlink_list = []
            cifs_attrs = result.get_child_by_name('attributes-list').\
                get_child_by_name('cifs-share')
            if cifs_attrs.get_child_by_name('share-properties'):
                properties_attrs = cifs_attrs['share-properties']
                if properties_attrs is not None:
                    properties_list = [
                        property.get_content()
                        for property in properties_attrs.get_children()
                    ]
            if cifs_attrs.get_child_by_name('symlink-properties'):
                symlink_attrs = cifs_attrs['symlink-properties']
                if symlink_attrs is not None:
                    symlink_list = [
                        symlink.get_content()
                        for symlink in symlink_attrs.get_children()
                    ]
            return_value = {
                'share': cifs_attrs.get_child_content('share-name'),
                'path': cifs_attrs.get_child_content('path'),
                'share_properties': properties_list,
                'symlink_properties': symlink_list
            }
            if cifs_attrs.get_child_by_name('vscan-fileop-profile'):
                return_value['vscan_fileop_profile'] = cifs_attrs[
                    'vscan-fileop-profile']

        return return_value

    def create_cifs_share(self):
        """
        Create CIFS share
        """
        options = {
            'share-name': self.parameters.get('share_name'),
            'path': self.parameters.get('path')
        }
        cifs_create = netapp_utils.zapi.NaElement.create_node_with_children(
            'cifs-share-create', **options)
        if self.parameters.get('share_properties'):
            property_attrs = netapp_utils.zapi.NaElement('share-properties')
            cifs_create.add_child_elem(property_attrs)
            for property in self.parameters.get('share_properties'):
                property_attrs.add_new_child('cifs-share-properties', property)
        if self.parameters.get('symlink_properties'):
            symlink_attrs = netapp_utils.zapi.NaElement('symlink-properties')
            cifs_create.add_child_elem(symlink_attrs)
            for symlink in self.parameters.get('symlink_properties'):
                symlink_attrs.add_new_child('cifs-share-symlink-properties',
                                            symlink)
        if self.parameters.get('vscan_fileop_profile'):
            fileop_attrs = netapp_utils.zapi.NaElement('vscan-fileop-profile')
            fileop_attrs.set_content(self.parameters['vscan_fileop_profile'])
            cifs_create.add_child_elem(fileop_attrs)

        try:
            self.server.invoke_successfully(cifs_create, enable_tunneling=True)
        except netapp_utils.zapi.NaApiError as error:

            self.module.fail_json(
                msg='Error creating cifs-share %s: %s' %
                (self.parameters.get('share_name'), to_native(error)),
                exception=traceback.format_exc())

    def delete_cifs_share(self):
        """
        Delete CIFS share
        """
        cifs_delete = netapp_utils.zapi.NaElement.create_node_with_children(
            'cifs-share-delete',
            **{'share-name': self.parameters.get('share_name')})

        try:
            self.server.invoke_successfully(cifs_delete, enable_tunneling=True)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(
                msg='Error deleting cifs-share %s: %s' %
                (self.parameters.get('share_name'), to_native(error)),
                exception=traceback.format_exc())

    def modify_cifs_share(self):
        """
        modilfy path for the given CIFS share
        """
        options = {'share-name': self.parameters.get('share_name')}
        cifs_modify = netapp_utils.zapi.NaElement.create_node_with_children(
            'cifs-share-modify', **options)
        if self.parameters.get('path'):
            cifs_modify.add_new_child('path', self.parameters.get('path'))
        if self.parameters.get('share_properties'):
            property_attrs = netapp_utils.zapi.NaElement('share-properties')
            cifs_modify.add_child_elem(property_attrs)
            for property in self.parameters.get('share_properties'):
                property_attrs.add_new_child('cifs-share-properties', property)
        if self.parameters.get('symlink_properties'):
            symlink_attrs = netapp_utils.zapi.NaElement('symlink-properties')
            cifs_modify.add_child_elem(symlink_attrs)
            for property in self.parameters.get('symlink_properties'):
                symlink_attrs.add_new_child('cifs-share-symlink-properties',
                                            property)
        if self.parameters.get('vscan_fileop_profile'):
            fileop_attrs = netapp_utils.zapi.NaElement('vscan-fileop-profile')
            fileop_attrs.set_content(self.parameters['vscan_fileop_profile'])
            cifs_modify.add_child_elem(fileop_attrs)
        try:
            self.server.invoke_successfully(cifs_modify, enable_tunneling=True)
        except netapp_utils.zapi.NaApiError as error:
            self.module.fail_json(
                msg='Error modifying cifs-share %s:%s' %
                (self.parameters.get('share_name'), to_native(error)),
                exception=traceback.format_exc())

    def apply(self):
        '''Apply action to cifs share'''
        netapp_utils.ems_log_event("na_ontap_cifs", self.server)
        current = self.get_cifs_share()
        cd_action = self.na_helper.get_cd_action(current, self.parameters)
        if cd_action is None:
            modify = self.na_helper.get_modified_attributes(
                current, self.parameters)
        if self.na_helper.changed:
            if self.module.check_mode:
                pass
            else:
                if cd_action == 'create':
                    self.create_cifs_share()
                elif cd_action == 'delete':
                    self.delete_cifs_share()
                elif modify:
                    self.modify_cifs_share()
        self.module.exit_json(changed=self.na_helper.changed)