Example #1
0
 def wait_for_lease_ready(self, lease):
     done = event.Event()
     loop = loopingcall.FixedIntervalLoopingCall(self._poll_lease, lease,
                                                 done)
     loop.start(self._task_poll_interval)
     done.wait()
     loop.stop()
Example #2
0
    def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval):
        loop = loopingcall.FixedIntervalLoopingCall(self._wait_for_pair_status,
                                                    pvol, svol,
                                                    is_vvol, status, timeout,
                                                    time.time())

        loop.start(interval=interval).wait()
Example #3
0
    def wait_for_sync(self, conn, syncName):
        """Given the sync name wait for it to fully synchronize.

        :param conn: connection to the ecom server
        :param syncName: the syncName
        """

        def _wait_for_sync():
            """Called at an interval until the synchronization is finished."""
            retries = kwargs['retries']
            wait_for_sync_called = kwargs['wait_for_sync_called']
            if self._is_sync_complete(conn, syncName):
                raise loopingcall.LoopingCallDone()
            if retries > JOB_RETRIES:
                LOG.error(_LE("_wait_for_sync failed after %(retries)d "
                              "tries."),
                          {'retries': retries})
                raise loopingcall.LoopingCallDone()
            try:
                kwargs['retries'] = retries + 1
                if not wait_for_sync_called:
                    if self._is_sync_complete(conn, syncName):
                        kwargs['wait_for_sync_called'] = True
            except Exception as e:
                LOG.error(_LE("Exception: %s") % six.text_type(e))
                exceptionMessage = (_("Issue encountered waiting for "
                                      "synchronization."))
                LOG.error(exceptionMessage)
                raise exception.VolumeBackendAPIException(exceptionMessage)

        kwargs = {'retries': 0,
                  'wait_for_sync_called': False}
        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync)
        timer.start(interval=INTERVAL_10_SEC).wait()
Example #4
0
    def _wait_for_a_condition(self, testmethod, timeout=None,
                              interval=INTERVAL_1_SEC):
        start_time = time.time()
        if timeout is None:
            timeout = DEFAULT_TIMEOUT

        def _inner():
            try:
                testValue = testmethod()
            except Exception as ex:
                testValue = False
                LOG.debug('Helper.'
                          '_wait_for_condition: %(method_name)s '
                          'execution failed for %(exception)s',
                          {'method_name': testmethod.__name__,
                           'exception': ex.message})
            if testValue:
                raise loopingcall.LoopingCallDone()

            if int(time.time()) - start_time > timeout:
                msg = (_('CommandLineHelper._wait_for_condition: %s timeout')
                       % testmethod.__name__)
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)

        timer = loopingcall.FixedIntervalLoopingCall(_inner)
        timer.start(interval=interval).wait()
Example #5
0
    def wait_for_volume_removal(self, volume_path):
        """This is used to ensure that volumes are gone."""
        def _wait_for_volume_removal(volume_path):
            LOG.debug("Waiting for SCSI mount point %s to be removed.",
                      volume_path)
            if os.path.exists(volume_path):
                if self.tries >= self.scan_attempts:
                    msg = _LE("Exceeded the number of attempts to detect "
                              "volume removal.")
                    LOG.error(msg)
                    raise exception.VolumePathNotRemoved(
                        volume_path=volume_path)

                LOG.debug(
                    "%(path)s still exists, rescanning. Try number: "
                    "%(tries)s", {
                        'path': volume_path,
                        'tries': self.tries
                    })
                self.tries = self.tries + 1
            else:
                LOG.debug("SCSI mount point %s has been removed.", volume_path)
                raise loopingcall.LoopingCallDone()

        # Setup a loop here to give the kernel time
        # to remove the volume from /dev/disk/by-path/
        self.tries = 0
        self.scan_attempts = 3
        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_volume_removal,
                                                     volume_path)
        timer.start(interval=2).wait()
Example #6
0
    def _wait_for_targetstate(self, target_name):
        """Polls backend to verify an iscsi target configuration.

        This function will try to verify the creation of an iscsi
        target on both gateway nodes of the array every 5 seconds.

        Arguments:
            target_name -- name of iscsi target to be polled

        Returns:
            True if the export state was correctly added
        """
        bn = "/vshare/config/iscsi/target/%s" % (target_name)

        def _loop_func():
            status = [False, False]
            mg_conns = [self.common.mga, self.common.mgb]

            LOG.debug("Entering _wait_for_targetstate loop: target=%s.",
                      target_name)

            for node_id in xrange(2):
                resp = mg_conns[node_id].basic.get_node_values(bn)
                if len(resp.keys()):
                    status[node_id] = True

            if status[0] and status[1]:
                raise loopingcall.LoopingCallDone(retvalue=True)

        timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
        success = timer.start(interval=5).wait()

        return success
Example #7
0
    def _wait_for_job_complete(self, conn, job):
        """Given the job wait for it to complete.

        :param conn: connection to the ecom server
        :param job: the job dict
        """
        def _wait_for_job_complete():
            """Called at an interval until the job is finished"""
            if self._is_job_finished(conn, job):
                raise loopingcall.LoopingCallDone()
            if self.retries > JOB_RETRIES:
                LOG.error(
                    _("_wait_for_job_complete failed after %(retries)d "
                      "tries") % {'retries': self.retries})

                raise loopingcall.LoopingCallDone()
            try:
                self.retries += 1
                if not self.wait_for_job_called:
                    if self._is_job_finished(conn, job):
                        self.wait_for_job_called = True
            except Exception as e:
                LOG.error(_("Exception: %s") % six.text_type(e))
                exceptionMessage = (_("Issue encountered waiting for job."))
                LOG.error(exceptionMessage)
                raise exception.VolumeBackendAPIException(exceptionMessage)

        self.retries = 0
        self.wait_for_job_called = False
        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete)
        timer.start(interval=INTERVAL_10_SEC).wait()
Example #8
0
    def exec_hsnm(self, command, args, printflag=True, noretry=False,
                  timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL):
        args = '%s %s %s' % (SNM2_ENV, command, args)

        loop = loopingcall.FixedIntervalLoopingCall(
            self._wait_for_exec_hsnm, args, printflag,
            noretry, timeout, time.time())

        return loop.start(interval=interval).wait()
Example #9
0
 def add_timer(self,
               interval,
               callback,
               initial_delay=None,
               *args,
               **kwargs):
     pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
     pulse.start(interval=interval, initial_delay=initial_delay)
     self.timers.append(pulse)
Example #10
0
    def backup(self, backup, volume_file, backup_metadata=True):
        """Backup the given volume to Swift."""
        (object_meta, container,
         volume_size_bytes) = self._prepare_backup(backup)
        counter = 0
        total_block_sent_num = 0

        # There are two mechanisms to send the progress notification.
        # 1. The notifications are periodically sent in a certain interval.
        # 2. The notifications are sent after a certain number of chunks.
        # Both of them are working simultaneously during the volume backup,
        # when swift is taken as the backup backend.
        def _notify_progress():
            self._send_progress_notification(self.context, backup, object_meta,
                                             total_block_sent_num,
                                             volume_size_bytes)

        timer = loopingcall.FixedIntervalLoopingCall(_notify_progress)
        if self.enable_progress_timer:
            timer.start(interval=self.backup_timer_interval)

        while True:
            data = volume_file.read(self.data_block_size_bytes)
            data_offset = volume_file.tell()
            if data == '':
                break
            self._backup_chunk(backup, container, data, data_offset,
                               object_meta)
            total_block_sent_num += self.data_block_num
            counter += 1
            if counter == self.data_block_num:
                # Send the notification to Ceilometer when the chunk
                # number reaches the data_block_num. The backup percentage
                # is put in the metadata as the extra information.
                self._send_progress_notification(self.context, backup,
                                                 object_meta,
                                                 total_block_sent_num,
                                                 volume_size_bytes)
                # reset the counter
                counter = 0

        # Stop the timer.
        timer.stop()
        # All the data have been sent, the backup_percent reaches 100.
        self._send_progress_end(self.context, backup, object_meta)

        if backup_metadata:
            try:
                self._backup_metadata(backup, object_meta)
            except Exception as err:
                with excutils.save_and_reraise_exception():
                    LOG.exception(
                        _LE("Backup volume metadata to swift failed: %s") %
                        six.text_type(err))
                    self.delete(backup)

        self._finalize_backup(backup, container, object_meta)
Example #11
0
    def start(self):
        version_string = version.version_string()
        LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
                 {'topic': self.topic, 'version_string': version_string})
        self.model_disconnected = False
        self.manager.init_host()
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt,
                                                 self.host,
                                                 self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        LOG.debug("Creating RPC server for service %s", self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        serializer = objects_base.CinderObjectSerializer()
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(
                self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Example #12
0
    def create_volume(self, volume):
        """Creates a EMC volume."""

        LOG.debug(_('Entering create_volume.'))
        volumesize = volume['size']
        volumename = volume['name']

        LOG.info(
            _('Create Volume: %(volume)s  Size: %(size)s') % {
                'volume': volumename,
                'size': volumesize
            })

        # defining CLI command
        thinness = self._get_provisioning_by_volume(volume)

        # executing CLI command to create volume
        LOG.debug(
            _('Create Volume: %(volumename)s') % {'volumename': volumename})

        lun_create = ('lun', '-create', '-type', thinness, '-capacity',
                      volumesize, '-sq', 'gb', '-poolName', self.pool_name,
                      '-name', volumename)
        out, rc = self._cli_execute(*lun_create)
        LOG.debug(
            _('Create Volume: %(volumename)s  Return code: %(rc)s') % {
                'volumename': volumename,
                'rc': rc
            })
        if rc == 4:
            LOG.warn(_('Volume %s already exists'), volumename)
        elif rc != 0:
            msg = (_('Failed to create %(volumename)s: %(out)s') % {
                'volumename': volumename,
                'out': out
            })
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        # wait for up to a minute to verify that the LUN has progressed
        # to Ready state
        def _wait_for_lun_ready(volumename, start_time):
            # executing cli command to check volume
            command_to_verify = ('lun', '-list', '-name', volumename)
            out, rc = self._cli_execute(*command_to_verify)
            if rc == 0 and out.find("Ready") > -1:
                raise loopingcall.LoopingCallDone()
            if int(time.time()) - start_time > self.timeout * 60:
                msg = (_('LUN %s failed to become Ready'), volumename)
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)

        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_lun_ready,
                                                     volumename,
                                                     int(time.time()))
        timer.start(interval=self.wait_interval).wait()
Example #13
0
    def set_chap_authention(self, port, gid):
        ctl_no = port[0]
        port_no = port[1]
        unit = self.unit_name
        auth_username = self.conf.hitachi_auth_user
        auth_password = self.conf.hitachi_auth_password
        add_chap_user = self.conf.hitachi_add_chap_user
        assign_flag = True
        added_flag = False
        opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no,
                                                  auth_username)
        ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True)

        if ret:
            if not add_chap_user:
                msg = basic_lib.output_err(643, user=auth_username)
                raise exception.HBSDError(message=msg)

            root_helper = utils.get_root_helper()
            cmd = ('%s env %s auchapuser -unit %s -add %s %s '
                   '-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no,
                                         port_no, gid, auth_username))

            LOG.debug('Add CHAP user')
            loop = loopingcall.FixedIntervalLoopingCall(
                self._wait_for_add_chap_user, cmd, auth_username,
                auth_password, time.time())

            added_flag = loop.start(interval=EXEC_INTERVAL).wait()

        else:
            lines = stdout.splitlines()[4:]
            for line in lines:
                if int(shlex.split(line)[0][0:3]) == gid:
                    assign_flag = False
                    break

        if assign_flag:
            opt = '-unit %s -assign %s %s -tno %d -user %s' % (
                unit, ctl_no, port_no, gid, auth_username)
            ret, stdout, stderr = self.exec_hsnm('auchapuser', opt)
            if ret:
                if added_flag:
                    _ret, _stdout, _stderr = self.delete_chap_user(port)
                    if _ret:
                        msg = basic_lib.set_msg(303, user=auth_username)
                        LOG.warning(msg)

                msg = basic_lib.output_err(600,
                                           cmd='auchapuser',
                                           ret=ret,
                                           out=stdout,
                                           err=stderr)
                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)

        return added_flag
Example #14
0
    def wait_for_task(self, task):
        """Return a deferred that will give the result of the given task.

        The task is polled until it completes. The method returns the task
        information upon successful completion.

        :param task: Managed object reference of the task
        :return: Task info upon successful completion of the task
        """
        loop = loopingcall.FixedIntervalLoopingCall(self._poll_task, task)
        return loop.start(self._task_poll_interval).wait()
Example #15
0
    def connect_volume(self, connection_properties):
        """Discover and attach the volume.

        connection_properties for AoE must include:
        target_shelf - shelf id of volume
        target_lun - lun id of volume
        """
        aoe_device, aoe_path = self._get_aoe_info(connection_properties)

        device_info = {
            'type': 'block',
            'device': aoe_device,
            'path': aoe_path,
        }

        if os.path.exists(aoe_path):
            self._aoe_revalidate(aoe_device)
        else:
            self._aoe_discover()

        waiting_status = {'tries': 0}

        #NOTE(jbr_): Device path is not always present immediately
        def _wait_for_discovery(aoe_path):
            if os.path.exists(aoe_path):
                raise loopingcall.LoopingCallDone

            if waiting_status['tries'] >= self.device_scan_attempts:
                raise exception.VolumeDeviceNotFound(device=aoe_path)

            LOG.warn(
                _LW("AoE volume not yet found at: %(path)s. "
                    "Try number: %(tries)s"), {
                        'path': aoe_device,
                        'tries': waiting_status['tries']
                    })

            self._aoe_discover()
            waiting_status['tries'] += 1

        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_discovery,
                                                     aoe_path)
        timer.start(interval=2).wait()

        if waiting_status['tries']:
            LOG.debug(
                "Found AoE device %(path)s "
                "(after %(tries)s rediscover)", {
                    'path': aoe_path,
                    'tries': waiting_status['tries']
                })

        return device_info
Example #16
0
    def _wait_for_export_config(self,
                                volume_name,
                                snapshot_name=None,
                                state=False):
        """Polls backend to verify volume's export configuration.

        XG sets/queries following a request to create or delete a lun
        export may fail on the backend if vshared is still processing
        the export action (or times out).  We can check whether it is
        done by polling the export binding for a lun to ensure it is
        created or deleted.

        This function will try to verify the creation or removal of
        export state on both gateway nodes of the array every 5
        seconds.

        Arguments:
            volume_name   -- name of volume
            snapshot_name -- name of volume's snapshot
            state         -- True to poll for existence, False for lack of

        Returns:
            True if the export state was correctly added or removed
            (depending on 'state' param)
        """
        if not snapshot_name:
            bn = "/vshare/config/export/container/%s/lun/%s" \
                % (self.container, volume_name)
        else:
            bn = "/vshare/config/export/snapshot/container/%s/lun/%s/snap/%s" \
                % (self.container, volume_name, snapshot_name)

        def _loop_func(state):
            status = [False, False]
            mg_conns = [self.mga, self.mgb]

            LOG.debug("Entering _wait_for_export_config loop: state=%s.",
                      state)

            for node_id in xrange(2):
                resp = mg_conns[node_id].basic.get_node_values(bn)
                if state and len(resp.keys()):
                    status[node_id] = True
                elif (not state) and (not len(resp.keys())):
                    status[node_id] = True

            if status[0] and status[1]:
                raise loopingcall.LoopingCallDone(retvalue=True)

        timer = loopingcall.FixedIntervalLoopingCall(_loop_func, state)
        success = timer.start(interval=5).wait()

        return success
Example #17
0
 def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True):
     """Ensure vdisk has no flashcopy mappings."""
     timer = loopingcall.FixedIntervalLoopingCall(
         self._check_vdisk_fc_mappings, name, allow_snaps)
     # Create a timer greenthread. The default volume service heart
     # beat is every 10 seconds. The flashcopy usually takes hours
     # before it finishes. Don't set the sleep interval shorter
     # than the heartbeat. Otherwise volume service heartbeat
     # will not be serviced.
     LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s' % name)
     ret = timer.start(interval=self.check_fcmapping_interval).wait()
     timer.stop()
     return ret
Example #18
0
    def delete_snapshot(self, snapshot):
        """Deletes a snapshot."""
        LOG.debug(_('Entering delete_snapshot.'))

        snapshotname = snapshot['name']
        volumename = snapshot['volume_name']
        LOG.info(
            _('Delete Snapshot: %(snapshot)s: volume: %(volume)s') % {
                'snapshot': snapshotname,
                'volume': volumename
            })

        def _wait_for_snap_delete(snapshot, start_time):
            # defining CLI command
            snapshotname = snapshot['name']
            volumename = snapshot['volume_name']
            snap_destroy = ('snap', '-destroy', '-id', snapshotname, '-o')
            # executing CLI command
            out, rc = self._cli_execute(*snap_destroy)

            LOG.debug(
                _('Delete Snapshot: Volume: %(volumename)s  Snapshot: '
                  '%(snapshotname)s  Output: %(out)s') % {
                      'volumename': volumename,
                      'snapshotname': snapshotname,
                      'out': out
                  })

            if rc not in [0, 9, 5]:
                if rc == 13:
                    if int(time.time()) - start_time < \
                            self.timeout * 60:
                        LOG.info(_('Snapshot %s is in use'), snapshotname)
                    else:
                        msg = (_('Failed to destroy %s '
                                 ' because snapshot is in use.'), snapshotname)
                        LOG.error(msg)
                        raise exception.SnapshotIsBusy(data=msg)
                else:
                    msg = (_('Failed to destroy %s'), snapshotname)
                    LOG.error(msg)
                    raise exception.VolumeBackendAPIException(data=msg)
            else:
                raise loopingcall.LoopingCallDone()

        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snap_delete,
                                                     snapshot,
                                                     int(time.time()))
        timer.start(interval=self.wait_interval).wait()
    def _wait_run_delete_lun_snapshot(self, snapshot):
        """Run and wait for LUN snapshot to complete.

        Arguments:
            snapshot -- cinder snapshot object provided by the Manager
        """
        cinder_volume_id = snapshot['volume_id']
        cinder_snapshot_id = snapshot['id']

        comment = self._compress_snapshot_id(cinder_snapshot_id)
        oid = self.vmem_mg.snapshot.snapshot_comment_to_object_id(
            cinder_volume_id, comment)

        def _loop_func():
            LOG.debug("Entering _wait_run_delete_lun_snapshot loop: "
                      "vol=%(vol)s, snap_id=%(snap_id)s, oid=%(oid)s" % {
                          'vol': cinder_volume_id,
                          'oid': oid,
                          'snap_id': cinder_snapshot_id
                      })

            ans = self.vmem_mg.snapshot.delete_lun_snapshot(
                snapshot_object_id=oid)

            if ans['success']:
                LOG.debug("Delete snapshot %(snap_id)s for %(vol)s: "
                          "success" % {
                              'vol': cinder_volume_id,
                              'snap_id': cinder_snapshot_id
                          })
                raise loopingcall.LoopingCallDone(retvalue=True)
            else:
                LOG.warn(
                    _("Delete snapshot %(snap)s of %(vol)s "
                      "encountered temporary error: %(msg)s") % {
                          'snap': cinder_snapshot_id,
                          'vol': cinder_volume_id,
                          'msg': ans['msg']
                      })

        timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
        success = timer.start(interval=1).wait()

        if not success:
            raise ViolinBackendErr(
                _("Failed to delete snapshot %(snap)s of volume %(vol)s") % {
                    'snap': cinder_snapshot_id,
                    'vol': cinder_volume_id
                })
Example #20
0
    def _wait_for_volume_delete(self, volume_name):
        """Wait for volume delete to complete."""
        timer = loopingcall.FixedIntervalLoopingCall(
            self._check_volume_delete_finished, volume_name)
        LOG.debug('Calling _wait_for_volume_delete: volume_name %s.' %
                  volume_name)
        ret = timer.start(
            interval=CHECK_VOLUME_DELETE_FINISHED_INTERVAL).wait()
        timer.stop()
        if not ret:
            msg = (_LE('Delete volume failed,volume_name: %s.') % volume_name)
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(message=msg)

        LOG.debug('Finish _wait_for_volume_delete: volume_name %s.' %
                  volume_name)
Example #21
0
    def _wait_for_snapshot_delete(self, snapshot_name):
        """Wait for snapshot delete to complete."""
        timer = loopingcall.FixedIntervalLoopingCall(
            self._check_snapshot_delete_finished, snapshot_name)
        LOG.debug('Calling _wait_for_snapshot_delete: snapshot_name %s.' %
                  snapshot_name)
        ret = timer.start(
            interval=CHECK_SNAPSHOT_DELETE_FINISHED_INTERVAL).wait()
        timer.stop()
        if not ret:
            msg = (_LE('Delete snapshot failed,snapshot_name: %s.') %
                   snapshot_name)
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(message=msg)

        LOG.debug('Finish _wait_for_snapshot_delete: snapshot_name %s.' %
                  snapshot_name)
Example #22
0
    def delete_volume(self, pvc_volume_id):
        """
        Deletes the specified powervc volume id from powervc
        """
        LOG.debug(_("Deleting pvc volume: %s"), pvc_volume_id)
        if not pvc_volume_id:
            raise AttributeError(
                _("Powervc volume identifier must be "
                  "specified"))
        existed_pvc_volume = None
        try:
            existed_pvc_volume = PowerVCService._client.volumes.get(
                pvc_volume_id)
        except exceptions.NotFound:
            LOG.critical(_("pvc: %s no longer existed in powervc, ignore"),
                         pvc_volume_id)
            raise

        temp_status = getattr(existed_pvc_volume, 'status', None)
        if temp_status == constants.STATUS_DELETING:
            # Volume in deleting status, do not perform delete operation
            # again
            LOG.warning(_("pvc: %s is deleting in powervc, wait for status"),
                        pvc_volume_id)
        else:
            # volume available for deleting, perform delete opeartion
            PowerVCService._client.volumes.delete(pvc_volume_id)

        LOG.debug(_('wait until created volume deleted or status is ERROR'))
        timer = loopingcall.FixedIntervalLoopingCall(
            self._wait_for_state_change, existed_pvc_volume.id,
            getattr(existed_pvc_volume, 'status', None), '',
            constants.STATUS_DELETING)

        try:
            timer.start(interval=10).wait()
        except exception.VolumeNotFound:
            # deleted complete
            LOG.info(_("pvc: %s deleted successfully"), pvc_volume_id)
        except exception.InvalidVolume:
            LOG.critical(_("pvc: %s deleted failed, "), pvc_volume_id)
            # when delete failed raise exception
            raise exception.CinderException(
                _('Volume deletion failed for id: %s'), pvc_volume_id)
Example #23
0
def wait_for_condition(func, interval, timeout):
    start_time = time.time()

    def _inner():
        try:
            res = func()
        except Exception as ex:
            raise exception.VolumeBackendAPIException(data=ex)

        if res:
            raise loopingcall.LoopingCallDone()

        if int(time.time()) - start_time > timeout:
            msg = (_('wait_for_condition: %s timed out.') % func.__name__)
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

    timer = loopingcall.FixedIntervalLoopingCall(_inner)
    timer.start(interval=interval).wait()
Example #24
0
    def _add_vdisk_copy_op(self, ctxt, volume, new_op):
        metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
                                                     volume['id'])
        curr_ops = metadata.get('vdiskcopyops', None)
        if curr_ops:
            curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
            new_ops_list = curr_ops_list.append(new_op)
        else:
            new_ops_list = [new_op]
        new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
        self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
                                             {'vdiskcopyops': new_ops_str},
                                             False)
        if volume['id'] in self._vdiskcopyops:
            self._vdiskcopyops[volume['id']].append(new_op)
        else:
            self._vdiskcopyops[volume['id']] = [new_op]

        # We added the first copy operation, so start the looping call
        if len(self._vdiskcopyops) == 1:
            self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
                self._check_volume_copy_ops)
            self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
Example #25
0
    def backup(self, backup, volume_file, backup_metadata=True):
        """Backup the given volume.

           If backup['parent_id'] is given, then an incremental backup
           is performed.
        """
        if self.chunk_size_bytes % self.sha_block_size_bytes:
            err = _('Chunk size is not multiple of '
                    'block size for creating hash.')
            raise exception.InvalidBackup(reason=err)

        # Read the shafile of the parent backup if backup['parent_id']
        # is given.
        parent_backup_shafile = None
        parent_backup = None
        if backup['parent_id']:
            parent_backup = self.db.backup_get(self.context,
                                               backup['parent_id'])
            parent_backup_shafile = self._read_sha256file(parent_backup)
            parent_backup_shalist = parent_backup_shafile['sha256s']
            if (parent_backup_shafile['chunk_size'] !=
                    self.sha_block_size_bytes):
                err = (_('Hash block size has changed since the last '
                         'backup. New hash block size: %(new)s. Old hash '
                         'block size: %(old)s. Do a full backup.')
                       % {'old': parent_backup_shafile['chunk_size'],
                          'new': self.sha_block_size_bytes})
                raise exception.InvalidBackup(reason=err)
            # If the volume size increased since the last backup, fail
            # the incremental backup and ask user to do a full backup.
            if backup['size'] > parent_backup['size']:
                err = _('Volume size increased since the last '
                        'backup. Do a full backup.')
                raise exception.InvalidBackup(reason=err)

        (object_meta, object_sha256, extra_metadata, container,
         volume_size_bytes) = self._prepare_backup(backup)

        counter = 0
        total_block_sent_num = 0

        # There are two mechanisms to send the progress notification.
        # 1. The notifications are periodically sent in a certain interval.
        # 2. The notifications are sent after a certain number of chunks.
        # Both of them are working simultaneously during the volume backup,
        # when swift is taken as the backup backend.
        def _notify_progress():
            self._send_progress_notification(self.context, backup,
                                             object_meta,
                                             total_block_sent_num,
                                             volume_size_bytes)
        timer = loopingcall.FixedIntervalLoopingCall(
            _notify_progress)
        if self.enable_progress_timer:
            timer.start(interval=self.backup_timer_interval)

        sha256_list = object_sha256['sha256s']
        shaindex = 0
        while True:
            data_offset = volume_file.tell()
            data = volume_file.read(self.chunk_size_bytes)
            if data == '':
                break

            # Calculate new shas with the datablock.
            shalist = []
            off = 0
            datalen = len(data)
            while off < datalen:
                chunk_start = off
                chunk_end = chunk_start + self.sha_block_size_bytes
                if chunk_end > datalen:
                    chunk_end = datalen
                chunk = data[chunk_start:chunk_end]
                sha = hashlib.sha256(chunk).hexdigest()
                shalist.append(sha)
                off += self.sha_block_size_bytes
            sha256_list.extend(shalist)

            # If parent_backup is not None, that means an incremental
            # backup will be performed.
            if parent_backup:
                # Find the extent that needs to be backed up.
                extent_off = -1
                for idx, sha in enumerate(shalist):
                    if sha != parent_backup_shalist[shaindex]:
                        if extent_off == -1:
                            # Start of new extent.
                            extent_off = idx * self.sha_block_size_bytes
                    else:
                        if extent_off != -1:
                            # We've reached the end of extent.
                            extent_end = idx * self.sha_block_size_bytes
                            segment = data[extent_off:extent_end]
                            self._backup_chunk(backup, container, segment,
                                               data_offset + extent_off,
                                               object_meta,
                                               extra_metadata)
                            extent_off = -1
                    shaindex += 1

                # The last extent extends to the end of data buffer.
                if extent_off != -1:
                    extent_end = datalen
                    segment = data[extent_off:extent_end]
                    self._backup_chunk(backup, container, segment,
                                       data_offset + extent_off,
                                       object_meta, extra_metadata)
                    extent_off = -1
            else:  # Do a full backup.
                self._backup_chunk(backup, container, data, data_offset,
                                   object_meta, extra_metadata)

            # Notifications
            total_block_sent_num += self.data_block_num
            counter += 1
            if counter == self.data_block_num:
                # Send the notification to Ceilometer when the chunk
                # number reaches the data_block_num.  The backup percentage
                # is put in the metadata as the extra information.
                self._send_progress_notification(self.context, backup,
                                                 object_meta,
                                                 total_block_sent_num,
                                                 volume_size_bytes)
                # Reset the counter
                counter = 0

        # Stop the timer.
        timer.stop()
        # All the data have been sent, the backup_percent reaches 100.
        self._send_progress_end(self.context, backup, object_meta)

        object_sha256['sha256s'] = sha256_list
        if backup_metadata:
            try:
                self._backup_metadata(backup, object_meta)
            # Whatever goes wrong, we want to log, cleanup, and re-raise.
            except Exception as err:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Backup volume metadata failed: %s."),
                                  err)
                    self.delete(backup)

        self._finalize_backup(backup, container, object_meta, object_sha256)
Example #26
0
    def do_setup(self, ctxt):
        """Check that we have all configuration details from the storage."""
        LOG.debug('enter: do_setup')

        # Get storage system name, id, and code level
        self._state.update(self._helpers.get_system_info())

        # Validate that the pool exists
        pool = self.configuration.storwize_svc_volpool_name
        try:
            self._helpers.get_pool_attrs(pool)
        except exception.VolumeBackendAPIException:
            msg = _('Failed getting details for pool %s') % pool
            raise exception.InvalidInput(reason=msg)

        # Check if compression is supported
        self._state['compression_enabled'] = \
            self._helpers.compression_enabled()

        # Get the available I/O groups
        self._state['available_iogrps'] = \
            self._helpers.get_available_io_groups()

        # Get the iSCSI and FC names of the Storwize/SVC nodes
        self._state['storage_nodes'] = self._helpers.get_node_info()

        # Add the iSCSI IP addresses and WWPNs to the storage node info
        self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
        self._helpers.add_fc_wwpns(self._state['storage_nodes'])

        # For each node, check what connection modes it supports.  Delete any
        # nodes that do not support any types (may be partially configured).
        to_delete = []
        for k, node in self._state['storage_nodes'].iteritems():
            if ((len(node['ipv4']) or len(node['ipv6']))
                    and len(node['iscsi_name'])):
                node['enabled_protocols'].append('iSCSI')
                self._state['enabled_protocols'].add('iSCSI')
            if len(node['WWPN']):
                node['enabled_protocols'].append('FC')
                self._state['enabled_protocols'].add('FC')
            if not len(node['enabled_protocols']):
                to_delete.append(k)
        for delkey in to_delete:
            del self._state['storage_nodes'][delkey]

        # Make sure we have at least one node configured
        if not len(self._state['storage_nodes']):
            msg = _('do_setup: No configured nodes.')
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        # Build the list of in-progress vdisk copy operations
        if ctxt is None:
            admin_context = context.get_admin_context()
        else:
            admin_context = ctxt.elevated()
        volumes = self.db.volume_get_all_by_host(admin_context, self.host)

        for volume in volumes:
            metadata = self.db.volume_admin_metadata_get(
                admin_context, volume['id'])
            curr_ops = metadata.get('vdiskcopyops', None)
            if curr_ops:
                ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
                self._vdiskcopyops[volume['id']] = ops

        # if vdiskcopy exists in database, start the looping call
        if len(self._vdiskcopyops) >= 1:
            self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
                self._check_volume_copy_ops)
            self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)

        LOG.debug('leave: do_setup')
Example #27
0
 def wait_for_lease_ready(self, lease):
     loop = loopingcall.FixedIntervalLoopingCall(self._poll_lease, lease)
     return loop.start(self._task_poll_interval).wait()
Example #28
0
    def create_volume(self,
                      local_volume_id,
                      size,
                      snapshot_id=None,
                      source_volid=None,
                      display_name=None,
                      display_description=None,
                      volume_type=None,
                      user_id=None,
                      project_id=None,
                      availability_zone=None,
                      metadata=None,
                      imageRef=None,
                      multiattach=False):
        """
        Creates a volume on powervc
        """

        # Use the standard cinderclient to create volume
        # TODO Do not pass metadata to PowerVC currently as we don't
        # know if this has a conflict with PowerVC design.
        pvc_volume = PowerVCService._client.volumes.create(
            size, snapshot_id, source_volid, display_name, display_description,
            volume_type, user_id, project_id, availability_zone, {}, imageRef,
            multiattach)

        # update powervc uuid to db immediately to avoid duplicated
        # synchronization
        additional_volume_data = {}
        additional_volume_data['metadata'] = metadata
        additional_volume_data['metadata'][constants.LOCAL_PVC_PREFIX + 'id'] \
            = pvc_volume.id
        db.volume_update(context.get_admin_context(), local_volume_id,
                         additional_volume_data)
        LOG.info(_("Volume %s start to create with PVC UUID: %s"),
                 local_volume_id, pvc_volume.id)

        temp_status = getattr(pvc_volume, 'status', None)
        if temp_status == constants.STATUS_CREATING:
            LOG.debug(
                _('wait until created volume status is available or ERROR'))
            timer = loopingcall.FixedIntervalLoopingCall(
                self._wait_for_state_change, pvc_volume.id,
                getattr(pvc_volume, 'status', None),
                constants.STATUS_AVAILABLE, constants.STATUS_CREATING)

            try:
                timer.start(interval=10).wait()
                # set status to available
                additional_volume_data['status'] = \
                    constants.STATUS_AVAILABLE
            except:
                latest_pvc_volume = PowerVCService._client.volumes.get(
                    pvc_volume.id)
                additional_volume_data['status'] = getattr(
                    latest_pvc_volume, 'status', '')
        else:
            LOG.debug(_('Not in creating status, just set as powerVC status'))
            additional_volume_data['status'] = temp_status

        # return updated volume status information
        return additional_volume_data
Example #29
0
    def _convert_to_base_volume(self, volume, new_cpg=None):
        try:
            type_info = self.get_volume_settings_from_type(volume)
            if new_cpg:
                cpg = new_cpg
            else:
                cpg = type_info['cpg']

            # Change the name such that it is unique since 3PAR
            # names must be unique across all CPGs
            volume_name = self._get_3par_vol_name(volume['id'])
            temp_vol_name = volume_name.replace("osv-", "omv-")

            # Create a physical copy of the volume
            task_id = self._copy_volume(volume_name, temp_vol_name,
                                        cpg, cpg, type_info['tpvv'])

            LOG.debug(_('Copy volume scheduled: convert_to_base_volume: '
                        'id=%s.') % volume['id'])

            # Wait for the physical copy task to complete
            def _wait_for_task(task_id):
                status = self.client.getTask(task_id)
                LOG.debug("3PAR Task id %(id)s status = %(status)s" %
                          {'id': task_id,
                           'status': status['status']})
                if status['status'] is not self.client.TASK_ACTIVE:
                    self._task_status = status
                    raise loopingcall.LoopingCallDone()

            self._task_status = None
            timer = loopingcall.FixedIntervalLoopingCall(
                _wait_for_task, task_id)
            timer.start(interval=1).wait()

            if self._task_status['status'] is not self.client.TASK_DONE:
                dbg = {'status': self._task_status, 'id': volume['id']}
                msg = _('Copy volume task failed: convert_to_base_volume: '
                        'id=%(id)s, status=%(status)s.') % dbg
                raise exception.CinderException(msg)
            else:
                LOG.debug(_('Copy volume completed: convert_to_base_volume: '
                            'id=%s.') % volume['id'])

            comment = self._get_3par_vol_comment(volume_name)
            if comment:
                self.client.modifyVolume(temp_vol_name, {'comment': comment})
            LOG.debug(_('Volume rename completed: convert_to_base_volume: '
                        'id=%s.') % volume['id'])

            # Delete source volume after the copy is complete
            self.client.deleteVolume(volume_name)
            LOG.debug(_('Delete src volume completed: convert_to_base_volume: '
                        'id=%s.') % volume['id'])

            # Rename the new volume to the original name
            self.client.modifyVolume(temp_vol_name, {'newName': volume_name})

            LOG.info(_('Completed: convert_to_base_volume: '
                       'id=%s.') % volume['id'])
        except hpexceptions.HTTPConflict:
            msg = _("Volume (%s) already exists on array.") % volume_name
            LOG.error(msg)
            raise exception.Duplicate(msg)
        except hpexceptions.HTTPBadRequest as ex:
            LOG.error(str(ex))
            raise exception.Invalid(ex.get_description())
        except exception.InvalidInput as ex:
            LOG.error(str(ex))
            raise ex
        except exception.CinderException as ex:
            LOG.error(str(ex))
            raise ex
        except Exception as ex:
            LOG.error(str(ex))
            raise exception.CinderException(ex)
Example #30
0
 def _start_periodic_tasks(self):
     ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
         self._update_ssc_info)
     ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL)