コード例 #1
0
ファイル: ssc_cmode.py プロジェクト: toddnni/cinder
def get_cluster_latest_ssc(*args, **kwargs):
    """Updates volumes including ssc."""
    backend = args[0]
    na_server = args[1]
    vserver = args[2]
    identity = six.text_type(id(backend))
    lock_pr = '%s_%s' % ('refresh_ssc', identity)

    # As this depends on stale job running state
    # set flag as soon as job starts to avoid
    # job accumulation.
    try:
        job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True)
        if not job_set:
            return

        @utils.synchronized(lock_pr)
        def get_latest_ssc():
            LOG.info(_LI('Running cluster latest ssc job for %(server)s'
                         ' and vserver %(vs)s'),
                     {'server': na_server, 'vs': vserver})
            ssc_vols = get_cluster_ssc(na_server, vserver)
            backend.refresh_ssc_vols(ssc_vols)
            backend.ssc_run_time = timeutils.utcnow()
            LOG.info(_LI('Successfully completed ssc job for %(server)s'
                         ' and vserver %(vs)s'),
                     {'server': na_server, 'vs': vserver})

        get_latest_ssc()
    finally:
        na_utils.set_safe_attr(backend, 'ssc_job_running', False)
コード例 #2
0
ファイル: ssc_cmode.py プロジェクト: NeCTAR-RC/cinder
def refresh_cluster_stale_ssc(*args, **kwargs):
    """Refreshes stale ssc volumes with latest."""
    backend = args[0]
    na_server = args[1]
    vserver = args[2]
    identity = six.text_type(id(backend))
    lock_pr = "%s_%s" % ("refresh_ssc", identity)
    try:
        job_set = na_utils.set_safe_attr(backend, "refresh_stale_running", True)
        if not job_set:
            return

        @utils.synchronized(lock_pr)
        def refresh_stale_ssc():
            stale_vols = backend._update_stale_vols(reset=True)
            LOG.info(
                _LI("Running stale ssc refresh job for %(server)s" " and vserver %(vs)s")
                % {"server": na_server, "vs": vserver}
            )
            # refreshing single volumes can create inconsistency
            # hence doing manipulations on copy
            ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
            refresh_vols = set()
            expired_vols = set()
            for vol in stale_vols:
                name = vol.id["name"]
                res = get_cluster_vols_with_ssc(na_server, vserver, name)
                if res:
                    refresh_vols.add(res.pop())
                else:
                    expired_vols.add(vol)
            for vol in refresh_vols:
                for k in ssc_vols_copy:
                    vol_set = ssc_vols_copy[k]
                    vol_set.discard(vol)
                    if k == "mirrored" and vol.mirror.get("mirrored"):
                        vol_set.add(vol)
                    if k == "dedup" and vol.sis.get("dedup"):
                        vol_set.add(vol)
                    if k == "compression" and vol.sis.get("compression"):
                        vol_set.add(vol)
                    if k == "thin" and vol.space.get("thin_provisioned"):
                        vol_set.add(vol)
                    if k == "all":
                        vol_set.add(vol)
            for vol in expired_vols:
                for k in ssc_vols_copy:
                    vol_set = ssc_vols_copy[k]
                    vol_set.discard(vol)
            backend.refresh_ssc_vols(ssc_vols_copy)
            LOG.info(
                _LI("Successfully completed stale refresh job for" " %(server)s and vserver %(vs)s")
                % {"server": na_server, "vs": vserver}
            )

        refresh_stale_ssc()
    finally:
        na_utils.set_safe_attr(backend, "refresh_stale_running", False)
コード例 #3
0
ファイル: ssc_utils.py プロジェクト: AsherBond/cinder
def refresh_cluster_stale_ssc(*args, **kwargs):
    """Refreshes stale ssc volumes with latest."""
    backend = args[0]
    na_server = args[1]
    vserver = args[2]
    identity = str(id(backend))
    lock_pr = '%s_%s' % ('refresh_ssc', identity)
    try:
        job_set = na_utils.set_safe_attr(
            backend, 'refresh_stale_running', True)
        if not job_set:
            return

        @utils.synchronized(lock_pr)
        def refresh_stale_ssc():
            stale_vols = backend._update_stale_vols(reset=True)
            LOG.info(_('Running stale ssc refresh job for %(server)s'
                       ' and vserver %(vs)s')
                     % {'server': na_server, 'vs': vserver})
            # refreshing single volumes can create inconsistency
            # hence doing manipulations on copy
            ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
            refresh_vols = set()
            expired_vols = set()
            for vol in stale_vols:
                name = vol.id['name']
                res = get_cluster_vols_with_ssc(na_server, vserver, name)
                if res:
                    refresh_vols.add(res.pop())
                else:
                    expired_vols.add(vol)
            for vol in refresh_vols:
                for k in ssc_vols_copy:
                    vol_set = ssc_vols_copy[k]
                    vol_set.discard(vol)
                    if k == "mirrored" and vol.mirror.get('mirrored'):
                        vol_set.add(vol)
                    if k == "dedup" and vol.sis.get('dedup'):
                        vol_set.add(vol)
                    if k == "compression" and vol.sis.get('compression'):
                        vol_set.add(vol)
                    if k == "thin" and vol.space.get('thin_provisioned'):
                        vol_set.add(vol)
                    if k == "all":
                        vol_set.add(vol)
            for vol in expired_vols:
                for k in ssc_vols_copy:
                    vol_set = ssc_vols_copy[k]
                    vol_set.discard(vol)
            backend.refresh_ssc_vols(ssc_vols_copy)
            LOG.info(_('Successfully completed stale refresh job for'
                       ' %(server)s and vserver %(vs)s')
                     % {'server': na_server, 'vs': vserver})

        refresh_stale_ssc()
    finally:
        na_utils.set_safe_attr(backend, 'refresh_stale_running', False)
コード例 #4
0
ファイル: iscsi.py プロジェクト: nash-x/hws
 def _garbage_collect_tmp_vols(self):
     """Removes tmp vols with no snapshots."""
     try:
         if not utils.set_safe_attr(self, "clean_job_running", True):
             LOG.warn(_("Returning as clean tmp vol job already running."))
             return
         for label in self._objects["volumes"]["label_ref"].keys():
             if label.startswith("tmp-") and not self._is_volume_containing_snaps(label):
                 try:
                     self._delete_volume(label)
                 except exception.NetAppDriverException:
                     LOG.debug("Error deleting vol with label %s.", label)
     finally:
         utils.set_safe_attr(self, "clean_job_running", False)
コード例 #5
0
ファイル: iscsi.py プロジェクト: carrierstack/cinder
 def _garbage_collect_tmp_vols(self):
     """Removes tmp vols with no snapshots."""
     try:
         if not utils.set_safe_attr(self, 'clean_job_running', True):
             LOG.warn(_('Returning as clean tmp vol job already running.'))
             return
         for label in self._objects['volumes']['label_ref'].keys():
             if (label.startswith('tmp-')
                     and not self._is_volume_containing_snaps(label)):
                 try:
                     self._delete_volume(label)
                 except exception.NetAppDriverException:
                     LOG.debug("Error deleting vol with label %s.", label)
     finally:
         utils.set_safe_attr(self, 'clean_job_running', False)
コード例 #6
0
ファイル: library.py プロジェクト: shishirng/cinder
    def _garbage_collect_tmp_vols(self):
        """Removes tmp vols with no snapshots."""
        try:
            if not na_utils.set_safe_attr(self, "clean_job_running", True):
                LOG.warning(_LW("Returning as clean tmp " "vol job already running."))
                return

            for vol in self._client.list_volumes():
                label = vol["label"]
                if label.startswith("tmp-") and not self._is_volume_containing_snaps(label):
                    try:
                        self._client.delete_volume(vol["volumeRef"])
                    except exception.NetAppDriverException as e:
                        LOG.debug("Error deleting vol with label %s: %s", (label, e))
        finally:
            na_utils.set_safe_attr(self, "clean_job_running", False)
コード例 #7
0
ファイル: iscsi.py プロジェクト: NeCTAR-RC/cinder
 def _garbage_collect_tmp_vols(self):
     """Removes tmp vols with no snapshots."""
     try:
         if not na_utils.set_safe_attr(self, 'clean_job_running', True):
             LOG.warning(_LW('Returning as clean tmp '
                             'vol job already running.'))
             return
         for label in self._objects['volumes']['label_ref'].keys():
             if (label.startswith('tmp-') and
                     not self._is_volume_containing_snaps(label)):
                 try:
                     self._delete_volume(label)
                 except exception.NetAppDriverException:
                     LOG.debug("Error deleting vol with label %s.",
                               label)
     finally:
         na_utils.set_safe_attr(self, 'clean_job_running', False)
コード例 #8
0
ファイル: test_utils.py プロジェクト: balagopalraj/clearlinux
    def test_set_safe_attr(self):

        fake_object = mock.Mock()
        fake_object.fake_attr = None

        # test initial checks
        self.assertFalse(na_utils.set_safe_attr(None, fake_object, None))
        self.assertFalse(na_utils.set_safe_attr(fake_object, None, None))
        self.assertFalse(na_utils.set_safe_attr(fake_object, "fake_attr", None))

        # test value isn't changed if it shouldn't be and retval is False
        fake_object.fake_attr = "fake_value"
        self.assertFalse(na_utils.set_safe_attr(fake_object, "fake_attr", "fake_value"))
        self.assertEqual(fake_object.fake_attr, "fake_value")

        # test value is changed if it should be and retval is True
        self.assertTrue(na_utils.set_safe_attr(fake_object, "fake_attr", "new_fake_value"))
        self.assertEqual(fake_object.fake_attr, "new_fake_value")
コード例 #9
0
    def _garbage_collect_tmp_vols(self):
        """Removes tmp vols with no snapshots."""
        try:
            if not na_utils.set_safe_attr(self, 'clean_job_running', True):
                LOG.warning(_LW('Returning as clean tmp '
                                'vol job already running.'))
                return

            for vol in self._client.list_volumes():
                label = vol['label']
                if (label.startswith('tmp-') and
                        not self._is_volume_containing_snaps(label)):
                    try:
                        self._client.delete_volume(vol['volumeRef'])
                    except exception.NetAppDriverException as e:
                        LOG.debug("Error deleting vol with label %s: %s",
                                  (label, e))
        finally:
            na_utils.set_safe_attr(self, 'clean_job_running', False)
コード例 #10
0
ファイル: block_7mode.py プロジェクト: rahul4-jain/cinder
    def _refresh_volume_info(self):
        """Saves the volume information for the filer."""

        if (self.vol_refresh_time is None or self.vol_refresh_voluntary or
                timeutils.is_newer_than(self.vol_refresh_time,
                                        self.vol_refresh_interval)):
            try:
                job_set = na_utils.set_safe_attr(self, 'vol_refresh_running',
                                                 True)
                if not job_set:
                    LOG.warning(_LW("Volume refresh job already running. "
                                    "Returning..."))
                    return
                self.vol_refresh_voluntary = False
                self.vols = self.zapi_client.get_filer_volumes()
                self.vol_refresh_time = timeutils.utcnow()
            except Exception as e:
                LOG.warning(_LW("Error refreshing volume info. Message: %s"),
                            e)
            finally:
                na_utils.set_safe_attr(self, 'vol_refresh_running', False)
コード例 #11
0
    def test_set_safe_attr(self):

        fake_object = mock.Mock()
        fake_object.fake_attr = None

        # test initial checks
        self.assertFalse(na_utils.set_safe_attr(None, fake_object, None))
        self.assertFalse(na_utils.set_safe_attr(fake_object, None, None))
        self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr',
                                                None))

        # test value isn't changed if it shouldn't be and retval is False
        fake_object.fake_attr = 'fake_value'
        self.assertFalse(
            na_utils.set_safe_attr(fake_object, 'fake_attr', 'fake_value'))
        self.assertEqual('fake_value', fake_object.fake_attr)

        # test value is changed if it should be and retval is True
        self.assertTrue(
            na_utils.set_safe_attr(fake_object, 'fake_attr', 'new_fake_value'))
        self.assertEqual('new_fake_value', fake_object.fake_attr)
コード例 #12
0
    def _refresh_volume_info(self):
        """Saves the volume information for the filer."""

        if (self.vol_refresh_time is None
                or self.vol_refresh_voluntary or timeutils.is_newer_than(
                    self.vol_refresh_time, self.vol_refresh_interval)):
            try:
                job_set = na_utils.set_safe_attr(self, 'vol_refresh_running',
                                                 True)
                if not job_set:
                    LOG.warning("Volume refresh job already running. "
                                "Returning...")
                    return
                self.vol_refresh_voluntary = False
                self.vols = self.zapi_client.get_filer_volumes()
                self.volume_list = self._get_filtered_pools()
                self.vol_refresh_time = timeutils.utcnow()
            except Exception as e:
                LOG.warning("Error refreshing volume info. Message: %s", e)
            finally:
                na_utils.set_safe_attr(self, 'vol_refresh_running', False)
コード例 #13
0
def get_cluster_latest_ssc(*args, **kwargs):
    """Updates volumes including ssc."""
    backend = args[0]
    na_server = args[1]
    vserver = args[2]
    identity = six.text_type(id(backend))
    lock_pr = '%s_%s' % ('refresh_ssc', identity)

    # As this depends on stale job running state
    # set flag as soon as job starts to avoid
    # job accumulation.
    try:
        job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True)
        if not job_set:
            return

        @utils.synchronized(lock_pr)
        def get_latest_ssc():
            LOG.info(
                _LI('Running cluster latest ssc job for %(server)s'
                    ' and vserver %(vs)s'), {
                        'server': na_server,
                        'vs': vserver
                    })
            ssc_vols = get_cluster_ssc(na_server, vserver)
            backend.refresh_ssc_vols(ssc_vols)
            backend.ssc_run_time = timeutils.utcnow()
            LOG.info(
                _LI('Successfully completed ssc job for %(server)s'
                    ' and vserver %(vs)s'), {
                        'server': na_server,
                        'vs': vserver
                    })

        get_latest_ssc()
    finally:
        na_utils.set_safe_attr(backend, 'ssc_job_running', False)
コード例 #14
0
def refresh_cluster_stale_ssc(*args, **kwargs):
    """Refreshes stale ssc volumes with latest."""
    backend = args[0]
    na_server = args[1]
    vserver = args[2]
    identity = six.text_type(id(backend))
    lock_pr = '%s_%s' % ('refresh_ssc', identity)
    try:
        job_set = na_utils.set_safe_attr(backend, 'refresh_stale_running',
                                         True)
        if not job_set:
            return

        @utils.synchronized(lock_pr)
        def refresh_stale_ssc():
            stale_vols = backend._update_stale_vols(reset=True)
            LOG.info(
                _LI('Running stale ssc refresh job for %(server)s'
                    ' and vserver %(vs)s'), {
                        'server': na_server,
                        'vs': vserver
                    })
            # refreshing single volumes can create inconsistency
            # hence doing manipulations on copy
            ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
            refresh_vols = set()
            expired_vols = set()
            for vol in stale_vols:
                name = vol.id['name']
                res = get_cluster_vols_with_ssc(na_server, vserver, name)
                if res:
                    refresh_vols.add(res.pop())
                else:
                    expired_vols.add(vol)
            for vol in refresh_vols:
                for k in ssc_vols_copy:
                    vol_set = ssc_vols_copy[k]
                    vol_set.discard(vol)
                    if k == "mirrored" and vol.mirror.get('mirrored'):
                        vol_set.add(vol)
                    if k == "dedup" and vol.sis.get('dedup'):
                        vol_set.add(vol)
                    if k == "compression" and vol.sis.get('compression'):
                        vol_set.add(vol)
                    if k == "thin" and vol.space.get('thin_provisioned'):
                        vol_set.add(vol)
                    if k == "all":
                        vol_set.add(vol)
            for vol in expired_vols:
                for k in ssc_vols_copy:
                    vol_set = ssc_vols_copy[k]
                    vol_set.discard(vol)
            backend.refresh_ssc_vols(ssc_vols_copy)
            LOG.info(
                _LI('Successfully completed stale refresh job for'
                    ' %(server)s and vserver %(vs)s'), {
                        'server': na_server,
                        'vs': vserver
                    })

        refresh_stale_ssc()
    finally:
        na_utils.set_safe_attr(backend, 'refresh_stale_running', False)