Exemple #1
0
    def create_destination_flexvol(self, src_backend_name, dest_backend_name,
                                   src_flexvol_name, dest_flexvol_name):
        """Create a SnapMirror mirror target FlexVol for a given source."""
        dest_backend_config = config_utils.get_backend_configuration(
            dest_backend_name)
        dest_vserver = dest_backend_config.netapp_vserver
        dest_client = config_utils.get_client_for_backend(
            dest_backend_name, vserver_name=dest_vserver)

        source_backend_config = config_utils.get_backend_configuration(
            src_backend_name)
        src_vserver = source_backend_config.netapp_vserver
        src_client = config_utils.get_client_for_backend(
            src_backend_name, vserver_name=src_vserver)

        provisioning_options = (
            src_client.get_provisioning_options_from_flexvol(
                src_flexvol_name)
        )

        # If the source is encrypted then the destination needs to be
        # encrypted too. Using is_flexvol_encrypted because it includes
        # a simple check to ensure that the NVE feature is supported.
        if src_client.is_flexvol_encrypted(src_flexvol_name, src_vserver):
            provisioning_options['encrypt'] = 'true'

        # Remove size and volume_type
        size = provisioning_options.pop('size', None)
        if not size:
            msg = _("Unable to read the size of the source FlexVol (%s) "
                    "to create a SnapMirror destination.")
            raise na_utils.NetAppDriverException(msg % src_flexvol_name)
        provisioning_options.pop('volume_type', None)

        source_aggregate = provisioning_options.pop('aggregate')
        aggregate_map = self._get_replication_aggregate_map(
            src_backend_name, dest_backend_name)

        if not aggregate_map.get(source_aggregate):
            msg = _("Unable to find configuration matching the source "
                    "aggregate (%s) and the destination aggregate. Option "
                    "netapp_replication_aggregate_map may be incorrect.")
            raise na_utils.NetAppDriverException(
                message=msg % source_aggregate)

        destination_aggregate = aggregate_map[source_aggregate]

        # NOTE(gouthamr): The volume is intentionally created as a Data
        # Protection volume; junction-path will be added on breaking
        # the mirror.
        provisioning_options['volume_type'] = 'dp'
        dest_client.create_flexvol(dest_flexvol_name,
                                   destination_aggregate,
                                   size,
                                   **provisioning_options)
Exemple #2
0
    def create_group_snapshot(self, context, group_snapshot, snapshots):
        """Creates a Cinder group snapshot object.

        The Cinder group snapshot object is created by making use of an ONTAP
        consistency group snapshot in order to provide write-order consistency
        for a set of flexvols snapshots. First, a list of the flexvols backing
        the given Cinder group must be gathered. An ONTAP group-snapshot of
        these flexvols will create a snapshot copy of all the Cinder volumes in
        the generic volume group. For each Cinder volume in the group, it is
        then necessary to clone its backing file from the ONTAP cg-snapshot.
        The naming convention used to for the clones is what indicates the
        clone's role as a Cinder snapshot and its inclusion in a Cinder group.
        The ONTAP cg-snapshot of the flexvols is deleted after the cloning
        operation is completed.

        :returns: An implicit update for the group snapshot and snapshot models
                 that is then used by the manager to set the models to
                 available.
        """
        try:
            if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
                self._create_consistent_group_snapshot(group_snapshot,
                                                       snapshots)
            else:
                for snapshot in snapshots:
                    self._clone_backing_file_for_volume(
                        snapshot['volume_name'], snapshot['name'],
                        snapshot['volume_id'], is_snapshot=True)
        except Exception as ex:
            err_msg = (_("Create group snapshot failed (%s).") % ex)
            LOG.exception(err_msg, resource=group_snapshot)
            raise na_utils.NetAppDriverException(err_msg)

        return None, None
Exemple #3
0
    def create_vserver_peer(self, src_vserver, src_backend_name, dest_vserver,
                            peer_applications):
        """Create a vserver peer relationship"""
        src_client = config_utils.get_client_for_backend(
            src_backend_name, vserver_name=src_vserver)

        vserver_peers = src_client.get_vserver_peers(src_vserver, dest_vserver)
        if not vserver_peers:
            src_client.create_vserver_peer(
                src_vserver,
                dest_vserver,
                vserver_peer_application=peer_applications)
            LOG.debug(
                "Vserver peer relationship created between %(src)s "
                "and %(dest)s. Peering application set to %(app)s.", {
                    'src': src_vserver,
                    'dest': dest_vserver,
                    'app': peer_applications
                })
            return None

        for vserver_peer in vserver_peers:
            if all(app in vserver_peer['applications']
                   for app in peer_applications):
                LOG.debug("Found vserver peer relationship between %s and %s.",
                          src_vserver, dest_vserver)
                return None

        msg = _("Vserver peer relationship found between %(src)s and %(dest)s "
                "but peering application %(app)s isn't defined.")
        raise na_utils.NetAppDriverException(msg % {
            'src': src_vserver,
            'dest': dest_vserver,
            'app': peer_applications
        })
Exemple #4
0
 def wait_for_quiesced():
     snapmirror = dest_client.get_snapmirrors(
         src_vserver, src_flexvol_name, dest_vserver,
         dest_flexvol_name,
         desired_attributes=['relationship-status', 'mirror-state'])[0]
     if snapmirror.get('relationship-status') != 'quiesced':
         msg = _("SnapMirror relationship is not quiesced.")
         raise na_utils.NetAppDriverException(reason=msg)
Exemple #5
0
 def check_for_setup_error(self):
     """Check that the driver is working and can communicate."""
     if not self._get_flexvol_to_pool_map():
         msg = _('No pools are available for provisioning volumes. '
                 'Ensure that the configuration option '
                 'netapp_pool_name_search_pattern is set correctly.')
         raise na_utils.NetAppDriverException(msg)
     self._add_looping_tasks()
     super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
Exemple #6
0
 def _wait_lun_copy_complete():
     copy_status = self.zapi_client.get_lun_copy_status(job_uuid)
     LOG.debug(
         'Waiting for LUN copy job %s to complete. Current '
         'status is: %s.', job_uuid, copy_status['job-status'])
     if not copy_status:
         status_error_msg = (_("Error copying LUN %s. The "
                               "corresponding Job UUID % doesn't "
                               "exist."))
         raise na_utils.NetAppDriverException(status_error_msg %
                                              (volume.id, job_uuid))
     elif copy_status['job-status'] == 'destroyed':
         status_error_msg = (_('Error copying LUN %s. %s.'))
         raise na_utils.NetAppDriverException(
             status_error_msg %
             (volume.id, copy_status['last-failure-reason']))
     elif copy_status['job-status'] == 'complete':
         raise loopingcall.LoopingCallDone()
Exemple #7
0
    def check_for_setup_error(self):
        """Check that the driver is working and can communicate.

        Discovers the LUNs on the NetApp server.
        """
        if self.lun_ostype not in self.ALLOWED_LUN_OS_TYPES:
            msg = _("Invalid value for NetApp configuration"
                    " option netapp_lun_ostype.")
            LOG.error(msg)
            raise na_utils.NetAppDriverException(msg)
        if self.host_type not in self.ALLOWED_IGROUP_HOST_TYPES:
            msg = _("Invalid value for NetApp configuration"
                    " option netapp_host_type.")
            LOG.error(msg)
            raise na_utils.NetAppDriverException(msg)
        lun_list = self.zapi_client.get_lun_list()
        self._extract_and_populate_luns(lun_list)
        LOG.debug("Success getting list of LUNs from server.")
        self.loopingcalls.start_tasks()
Exemple #8
0
    def check_for_setup_error(self):
        """Check that the driver is working and can communicate."""
        self._add_looping_tasks()

        if (self.ssc_library.contains_flexgroup_pool()
                and not self.zapi_client.features.FLEXGROUP):
            msg = _('FlexGroup pool requires Data ONTAP 9.8 or later.')
            raise na_utils.NetAppDriverException(msg)

        super(NetAppCmodeNfsDriver, self).check_for_setup_error()
Exemple #9
0
    def create_group_snapshot(self, context, group_snapshot, snapshots):
        """Creates a Cinder group snapshot object.

        The Cinder group snapshot object is created by making use of an ONTAP
        consistency group snapshot in order to provide write-order consistency
        for a set of flexvols snapshots. First, a list of the flexvols backing
        the given Cinder group must be gathered. An ONTAP group-snapshot of
        these flexvols will create a snapshot copy of all the Cinder volumes in
        the generic volume group. For each Cinder volume in the group, it is
        then necessary to clone its backing file from the ONTAP cg-snapshot.
        The naming convention used to for the clones is what indicates the
        clone's role as a Cinder snapshot and its inclusion in a Cinder group.
        The ONTAP cg-snapshot of the flexvols is deleted after the cloning
        operation is completed.

        :returns: An implicit update for the group snapshot and snapshot models
                 that is then used by the manager to set the models to
                 available.
        """
        try:
            if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
                # NOTE(felipe_rodrigues): ONTAP FlexGroup does not support
                # consistency group snapshot, so all members must be inside
                # a FlexVol pool.
                for snapshot in snapshots:
                    if self._is_flexgroup(host=snapshot['volume']['host']):
                        msg = _("Cannot create consistency group snapshot with"
                                " volumes on a FlexGroup pool.")
                        raise na_utils.NetAppDriverException(msg)

                self._create_consistent_group_snapshot(group_snapshot,
                                                       snapshots)
            else:
                for snapshot in snapshots:
                    self.create_snapshot(snapshot)
        except Exception as ex:
            err_msg = (_("Create group snapshot failed (%s).") % ex)
            LOG.exception(err_msg, resource=group_snapshot)
            raise na_utils.NetAppDriverException(err_msg)

        return None, None
Exemple #10
0
    def create_group(self, context, group):
        """Driver entry point for creating a generic volume group.

        ONTAP does not maintain an actual group construct. As a result, no
        communtication to the backend is necessary for generic volume group
        creation.

        :returns: Hard-coded model update for generic volume group model.
        """
        model_update = {'status': fields.GroupStatus.AVAILABLE}
        if (self._is_flexgroup(host=group['host'])
                and volume_utils.is_group_a_cg_snapshot_type(group)):
            msg = _("Cannot create %s consistency group on FlexGroup pool.")
            raise na_utils.NetAppDriverException(msg % group['id'])

        return model_update
Exemple #11
0
    def _complete_failover(self,
                           source_backend_name,
                           replication_targets,
                           flexvols,
                           volumes,
                           failover_target=None):
        """Failover a backend to a secondary replication target."""
        volume_updates = []

        active_backend_name = failover_target or self._choose_failover_target(
            source_backend_name, flexvols, replication_targets)

        if active_backend_name is None:
            msg = _("No suitable host was found to failover.")
            raise na_utils.NetAppDriverException(msg)

        source_backend_config = config_utils.get_backend_configuration(
            source_backend_name)

        # 1. Start an update to try to get a last minute transfer before we
        # quiesce and break
        self.update_snapmirrors(source_backend_config, source_backend_name,
                                flexvols)
        # 2. Break SnapMirrors
        failed_to_break = self.break_snapmirrors(source_backend_config,
                                                 source_backend_name, flexvols,
                                                 active_backend_name)

        # 3. Update cinder volumes within this host
        for volume in volumes:
            replication_status = fields.ReplicationStatus.FAILED_OVER
            volume_pool = volume_utils.extract_host(volume['host'],
                                                    level='pool')
            if volume_pool in failed_to_break:
                replication_status = 'error'

            volume_update = {
                'volume_id': volume['id'],
                'updates': {
                    'replication_status': replication_status,
                },
            }
            volume_updates.append(volume_update)

        return active_backend_name, volume_updates
Exemple #12
0
    def _delete_lun(self, lun_name):
        """Helper method to delete LUN backing a volume or snapshot."""

        metadata = self._get_lun_attr(lun_name, 'metadata')
        if metadata:
            try:
                self.zapi_client.destroy_lun(metadata['Path'])
            except netapp_api.NaApiError as e:
                if e.code == netapp_api.EOBJECTNOTFOUND:
                    LOG.warning("Failure deleting LUN %(name)s. %(message)s",
                                {'name': lun_name, 'message': e})
                else:
                    error_message = (_('A NetApp Api Error occurred: %s') % e)
                    raise na_utils.NetAppDriverException(error_message)
            self.lun_table.pop(lun_name)
        else:
            LOG.warning("No entry in LUN table for volume/snapshot"
                        " %(name)s.", {'name': lun_name})
Exemple #13
0
    def update_group(self,
                     context,
                     group,
                     add_volumes=None,
                     remove_volumes=None):
        """Driver entry point for updating a generic volume group.

        Since no actual group construct is ever created in ONTAP, it is not
        necessary to update any metadata on the backend. Since this is a NO-OP,
        there is guaranteed to be no change in any of the volumes' statuses.
        """
        if volume_utils.is_group_a_cg_snapshot_type(group):
            for vol in add_volumes:
                if self._is_flexgroup(host=vol['host']):
                    msg = _("Cannot add volume from FlexGroup pool to "
                            "consistency group.")
                    raise na_utils.NetAppDriverException(msg)

        return None, None, None
Exemple #14
0
    def _migrate_volume_to_pool(self, volume, src_pool, dest_pool, vserver,
                                dest_backend_name):
        """Migrate volume to another Cinder Pool within the same vserver."""
        LOG.info(
            'Migrating volume %(vol)s from pool %(src)s to '
            '%(dest)s within vserver %(vserver)s.', {
                'vol': volume.id,
                'src': src_pool,
                'dest': dest_pool,
                'vserver': vserver
            })
        updates = {}
        try:
            self._move_lun(volume, src_pool, dest_pool)
        except na_utils.NetAppDriverTimeout:
            error_msg = (_('Timeout waiting volume %s to complete migration.'
                           'Volume status is set to maintenance to prevent '
                           'performing operations with this volume. Check the '
                           'migration status on the storage side and set '
                           'volume status manually if migration succeeded.'))
            LOG.warning(error_msg, volume.id)
            updates['status'] = fields.VolumeStatus.MAINTENANCE
        except na_utils.NetAppDriverException as e:
            error_msg = (_('Failed to migrate volume %(vol)s from pool '
                           '%(src)s to %(dest)s. %(err)s'))
            raise na_utils.NetAppDriverException(error_msg % {
                'vol': volume.id,
                'src': src_pool,
                'dest': dest_pool,
                'err': e
            })

        self._finish_migrate_volume_to_pool(volume, dest_pool)
        LOG.info(
            'Successfully migrated volume %(vol)s from pool %(src)s '
            'to %(dest)s within vserver %(vserver)s.', {
                'vol': volume.id,
                'src': src_pool,
                'dest': dest_pool,
                'vserver': vserver
            })
        return updates
Exemple #15
0
    def create_group_from_src(self,
                              context,
                              group,
                              volumes,
                              group_snapshot=None,
                              sorted_snapshots=None,
                              source_group=None,
                              sorted_source_vols=None):
        """Creates a group from a group snapshot or a group of cinder vols.

        :returns: An implicit update for the volumes model that is
                 interpreted by the manager as a successful operation.
        """
        LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes]))
        model_update = None
        volumes_model_update = []

        if group_snapshot:
            vols = zip(volumes, sorted_snapshots)

            for volume, snapshot in vols:
                update = self.create_volume_from_snapshot(volume, snapshot)
                update['id'] = volume['id']
                volumes_model_update.append(update)

        elif source_group and sorted_source_vols:
            hosts = []
            for source_vol in sorted_source_vols:
                # NOTE(felipe_rodrigues): ONTAP FlexGroup does not support
                # consistency group snapshot, so if any source volume is on a
                # FlexGroup, the operation must be create from a not-cg,
                # falling back to the generic group support.
                if self._is_flexgroup(host=source_vol['host']):
                    if volume_utils.is_group_a_cg_snapshot_type(group):
                        msg = _("Cannot create consistency group with volume "
                                "on a FlexGroup pool.")
                        raise na_utils.NetAppDriverException(msg)
                    else:
                        # falls back to generic support
                        raise NotImplementedError()
                hosts.append(source_vol['host'])

            flexvols = self._get_flexvol_names_from_hosts(hosts)

            # Create snapshot for backing flexvol
            snapshot_name = 'snapshot-temp-' + source_group['id']
            self.zapi_client.create_cg_snapshot(flexvols, snapshot_name)

            # Start clone process for new volumes
            vols = zip(volumes, sorted_source_vols)
            for volume, source_vol in vols:
                self._clone_backing_file_for_volume(
                    source_vol['name'],
                    volume['name'],
                    source_vol['id'],
                    source_snapshot=snapshot_name)
                volume_model_update = (self._get_volume_model_update(volume)
                                       or {})
                volume_model_update.update({
                    'id':
                    volume['id'],
                    'provider_location':
                    source_vol['provider_location'],
                })
                volumes_model_update.append(volume_model_update)

            # Delete backing flexvol snapshots
            for flexvol_name in flexvols:
                self.zapi_client.wait_for_busy_snapshot(
                    flexvol_name, snapshot_name)
                self.zapi_client.delete_snapshot(flexvol_name, snapshot_name)
        else:
            LOG.error("Unexpected set of parameters received when "
                      "creating group from source.")
            model_update = {'status': fields.GroupStatus.ERROR}

        return model_update, volumes_model_update
Exemple #16
0
    def create_destination_flexvol(self,
                                   src_backend_name,
                                   dest_backend_name,
                                   src_flexvol_name,
                                   dest_flexvol_name,
                                   pool_is_flexgroup=False):
        """Create a SnapMirror mirror target FlexVol for a given source."""
        dest_backend_config = config_utils.get_backend_configuration(
            dest_backend_name)
        dest_vserver = dest_backend_config.netapp_vserver
        dest_client = config_utils.get_client_for_backend(
            dest_backend_name, vserver_name=dest_vserver)

        source_backend_config = config_utils.get_backend_configuration(
            src_backend_name)
        src_vserver = source_backend_config.netapp_vserver
        src_client = config_utils.get_client_for_backend(
            src_backend_name, vserver_name=src_vserver)

        provisioning_options = (
            src_client.get_provisioning_options_from_flexvol(src_flexvol_name))
        provisioning_options.pop('is_flexgroup')

        # If the source is encrypted then the destination needs to be
        # encrypted too. Using is_flexvol_encrypted because it includes
        # a simple check to ensure that the NVE feature is supported.
        if src_client.is_flexvol_encrypted(src_flexvol_name, src_vserver):
            provisioning_options['encrypt'] = 'true'

        # Remove size and volume_type
        size = provisioning_options.pop('size', None)
        if not size:
            msg = _("Unable to read the size of the source FlexVol (%s) "
                    "to create a SnapMirror destination.")
            raise na_utils.NetAppDriverException(msg % src_flexvol_name)
        provisioning_options.pop('volume_type', None)

        source_aggregate = provisioning_options.pop('aggregate')
        aggregate_map = self._get_replication_aggregate_map(
            src_backend_name, dest_backend_name)

        destination_aggregate = []
        for src_aggr in source_aggregate:
            dst_aggr = aggregate_map.get(src_aggr, None)
            if dst_aggr:
                destination_aggregate.append(dst_aggr)
            else:
                msg = _("Unable to find configuration matching the source "
                        "aggregate and the destination aggregate. Option "
                        "netapp_replication_aggregate_map may be incorrect.")
                raise na_utils.NetAppDriverException(message=msg)

        # NOTE(gouthamr): The volume is intentionally created as a Data
        # Protection volume; junction-path will be added on breaking
        # the mirror.
        provisioning_options['volume_type'] = 'dp'

        if pool_is_flexgroup:
            compression_enabled = provisioning_options.pop(
                'compression_enabled', False)
            # cDOT compression requires that deduplication be enabled.
            dedupe_enabled = provisioning_options.pop(
                'dedupe_enabled', False) or compression_enabled

            dest_client.create_volume_async(dest_flexvol_name,
                                            destination_aggregate, size,
                                            **provisioning_options)

            timeout = self._get_replication_volume_online_timeout()

            def _wait_volume_is_online():
                volume_state = dest_client.get_volume_state(
                    flexvol_name=dest_flexvol_name)
                if volume_state and volume_state == 'online':
                    raise loopingcall.LoopingCallDone()

            try:
                wait_call = loopingcall.FixedIntervalWithTimeoutLoopingCall(
                    _wait_volume_is_online)
                wait_call.start(interval=5, timeout=timeout).wait()

                if dedupe_enabled:
                    dest_client.enable_volume_dedupe_async(dest_flexvol_name)
                if compression_enabled:
                    dest_client.enable_volume_compression_async(
                        dest_flexvol_name)

            except loopingcall.LoopingCallTimeOut:
                msg = _("Timeout waiting destination FlexGroup to to come "
                        "online.")
                raise na_utils.NetAppDriverException(msg)

        else:
            dest_client.create_flexvol(dest_flexvol_name,
                                       destination_aggregate[0], size,
                                       **provisioning_options)