Exemplo n.º 1
0
 def stop(self, graceful=False):
     if self.server:
         LOG.info(_LI("Stop the Notification server..."))
         self.server.stop()
         if graceful:
             LOG.info(_LI("Notification server stopped successfully. Waiting for final message to be processed..."))
             self.server.wait()
Exemplo n.º 2
0
    def _get_vol_byuuid(self, voluuid):
        volkey = self.volumeroot + voluuid
        result = self.client.read(volkey)

        volval = json.loads(result.value)
        LOG.info(_LI('Read key: %s from etcd, result is: %s'), volkey, volval)
        return volval
Exemplo n.º 3
0
    def _get_vol_byuuid(self, voluuid):
        volkey = self.volumeroot + voluuid
        result = self.client.read(volkey)

        volval = json.loads(result.value)
        LOG.info(_LI('Read key: %s from etcd, result is: %s'), volkey, volval)
        return volval
Exemplo n.º 4
0
 def start(self):
     LOG.info(_LI("Start RPC server..."))
     self.server = oslo_messaging.get_rpc_server(self.transport,
                                                 self.target,
                                                 self.endpoints)
     self.server.start()
     self.server.wait()
Exemplo n.º 5
0
 def _clear_fault(self, alarm_id, entity_instance_id=None):
     # Only clear alarm if there is one already raised
     if (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH
             and self.current_health_alarm):
         LOG.info(_LI("Clearing health alarm"))
         self.service.fm_api.clear_fault(
             fm_constants.FM_ALARM_ID_STORAGE_CEPH,
             self.service.entity_instance_id)
     elif (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE
           and entity_instance_id in self.current_quota_alarms):
         LOG.info(
             _LI("Clearing quota alarm with entity_instance_id %s") %
             entity_instance_id)
         self.service.fm_api.clear_fault(
             fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE,
             entity_instance_id)
Exemplo n.º 6
0
 def _set_upgrade(self, upgrade):
     state = upgrade.get('state')
     from_version = upgrade.get('from_version')
     if (state and state != constants.UPGRADE_COMPLETED
             and from_version == constants.TITANIUM_SERVER_VERSION_16_10):
         LOG.info(_LI("Surpress require_jewel_osds health warning"))
         self.surpress_require_jewel_osds_warning = True
Exemplo n.º 7
0
def osd_pool_create(ceph_api, pool_name, pg_num, pgp_num):
    if pool_name.endswith("-cache"):
        # ruleset 1: is the ruleset for the cache tier
        # Name: cache_tier_ruleset
        ruleset = 1
    else:
        # ruleset 0: is the default ruleset if no crushmap is loaded or
        # the ruleset for the backing tier if loaded:
        # Name: storage_tier_ruleset
        ruleset = 0
    response, body = ceph_api.osd_pool_create(pool_name,
                                              pg_num,
                                              pgp_num,
                                              pool_type="replicated",
                                              ruleset=ruleset,
                                              body='json')
    if response.ok:
        LOG.info(
            _LI("Created OSD pool: "
                "pool_name={}, pg_num={}, pgp_num={}, "
                "pool_type=replicated, ruleset={}").format(
                    pool_name, pg_num, pgp_num, ruleset))
    else:
        e = exception.CephPoolCreateFailure(name=pool_name,
                                            reason=response.reason)
        LOG.error(e)
        raise e

    # Explicitly assign the ruleset to the pool on creation since it is
    # ignored in the create call
    response, body = ceph_api.osd_set_pool_param(pool_name,
                                                 "crush_ruleset",
                                                 ruleset,
                                                 body='json')
    if response.ok:
        LOG.info(
            _LI("Assigned crush ruleset to OS pool: "
                "pool_name={}, ruleset={}").format(pool_name, ruleset))
    else:
        e = exception.CephPoolRulesetFailure(name=pool_name,
                                             reason=response.reason)
        LOG.error(e)
        ceph_api.osd_pool_delete(pool_name,
                                 pool_name,
                                 sure='--yes-i-really-really-mean-it',
                                 body='json')
        raise e
Exemplo n.º 8
0
 def auto_heal(self, health):
     if (health['health'] == constants.CEPH_HEALTH_WARN
             and (constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET
                  in health['detail'])):
         try:
             upgrade = self.service.get_software_upgrade_status()
         except Exception as ex:
             LOG.warn(
                 _LW("Getting software upgrade status failed "
                     "with: %s. Skip auto-heal attempt "
                     "(will retry on next ceph status poll).") % str(ex))
             return
         state = upgrade.get('state')
         # surpress require_jewel_osds in case upgrade is
         # in progress but not completed or aborting
         if (not self.surpress_require_jewel_osds_warning
                 and (upgrade.get('from_version')
                      == constants.TITANIUM_SERVER_VERSION_16_10)
                 and state not in [
                     None, constants.UPGRADE_COMPLETED,
                     constants.UPGRADE_ABORTING,
                     constants.UPGRADE_ABORT_COMPLETING,
                     constants.UPGRADE_ABORTING_ROLLBACK
                 ]):
             LOG.info(_LI("Surpress require_jewel_osds health warning"))
             self.surpress_require_jewel_osds_warning = True
         # set require_jewel_osds in case upgrade is
         # not in progress or completed
         if (state in [None, constants.UPGRADE_COMPLETED]):
             LOG.warn(
                 _LW("No upgrade in progress or update completed "
                     "and require_jewel_osds health warning raised. "
                     "Set require_jewel_osds flag."))
             self.set_flag_require_jewel_osds()
             health = self._remove_require_jewel_osds_warning(health)
             LOG.info(_LI("Unsurpress require_jewel_osds health warning"))
             self.surpress_require_jewel_osds_warning = False
         # unsurpress require_jewel_osds in case upgrade
         # is aborting
         if (self.surpress_require_jewel_osds_warning and state in [
                 constants.UPGRADE_ABORTING,
                 constants.UPGRADE_ABORT_COMPLETING,
                 constants.UPGRADE_ABORTING_ROLLBACK
         ]):
             LOG.info(_LI("Unsurpress require_jewel_osds health warning"))
             self.surpress_require_jewel_osds_warning = False
     return health
Exemplo n.º 9
0
    def get_tiers_size(self, _):
        """Get the ceph cluster tier sizes.

        returns: a dict of sizes (in GB) by tier name
        """

        tiers_size = self.service.monitor.tiers_size
        LOG.debug(_LI("Ceph cluster tiers (size in GB): %s") % str(tiers_size))
        return tiers_size
Exemplo n.º 10
0
def osd_pool_set_quota(ceph_api, pool_name, max_bytes=0, max_objects=0):
    """Set the quota for an OSD pool_name
    Setting max_bytes or max_objects to 0 will disable that quota param
    :param pool_name:         OSD pool_name
    :param max_bytes:    maximum bytes for OSD pool_name
    :param max_objects:  maximum objects for OSD pool_name
    """

    # Update quota if needed
    prev_quota = osd_pool_get_quota(ceph_api, pool_name)
    if prev_quota["max_bytes"] != max_bytes:
        resp, b = ceph_api.osd_set_pool_quota(pool_name,
                                              'max_bytes',
                                              max_bytes,
                                              body='json')
        if resp.ok:
            LOG.info(
                _LI("Set OSD pool_name quota: "
                    "pool_name={}, max_bytes={}").format(pool_name, max_bytes))
        else:
            e = exception.CephPoolSetQuotaFailure(pool=pool_name,
                                                  name='max_bytes',
                                                  value=max_bytes,
                                                  reason=resp.reason)
            LOG.error(e)
            raise e
    if prev_quota["max_objects"] != max_objects:
        resp, b = ceph_api.osd_set_pool_quota(pool_name,
                                              'max_objects',
                                              max_objects,
                                              body='json')
        if resp.ok:
            LOG.info(
                _LI("Set OSD pool_name quota: "
                    "pool_name={}, max_objects={}").format(
                        pool_name, max_objects))
        else:
            e = exception.CephPoolSetQuotaFailure(pool=pool_name,
                                                  name='max_objects',
                                                  value=max_objects,
                                                  reason=resp.reason)
            LOG.error(e)
            raise e
Exemplo n.º 11
0
    def _set_upgrade(self, upgrade):
        state = upgrade.get('state')
        from_version = upgrade.get('from_version')
        if (state and state != constants.UPGRADE_COMPLETED
                and from_version == constants.TITANIUM_SERVER_VERSION_18_03):

            LOG.info(
                _LI("Wait for ceph upgrade to complete before monitoring cluster."
                    ))
            self.wait_for_upgrade_complete = True
Exemplo n.º 12
0
    def update_vol(self, volid, key, val):
        volkey = self.volumeroot + volid
        result = self.client.read(volkey)
        volval = json.loads(result.value)
        volval[key] = val
        volval = json.dumps(volval)
        result.value = volval
        self.client.update(result)

        LOG.info(_LI('Update key: %s to etcd, value is: %s'), volkey, volval)
Exemplo n.º 13
0
    def update_vol(self, volid, key, val):
        volkey = self.volumeroot + volid
        result = self.client.read(volkey)
        volval = json.loads(result.value)
        volval[key] = val
        volval = json.dumps(volval)
        result.value = volval
        self.client.update(result)

        LOG.info(_LI('Update key: %s to etcd, value is: %s'), volkey, volval)
Exemplo n.º 14
0
 def cache_tiering_enable_cache(self, _, new_config, applied_config):
     LOG.info(_LI("Enabling cache"))
     try:
         self.service.cache_tiering.enable_cache(new_config, applied_config)
     except exception.CephManagerException as e:
         self.service.sysinv_conductor.call(
             {},
             'cache_tiering_enable_cache_complete',
             success=False,
             exception=str(e.message),
             new_config=new_config,
             applied_config=applied_config)
Exemplo n.º 15
0
    def get_primary_tier_size(self, _):
        """Get the ceph size for the primary tier.

        returns: an int for the size (in GB) of the tier
        """

        tiers_size = self.service.monitor.tiers_size
        primary_tier_size = tiers_size.get(
            self.service.monitor.primary_tier_name, 0)
        LOG.debug(_LI("Ceph cluster primary tier size: %s GB") %
                  str(primary_tier_size))
        return primary_tier_size
Exemplo n.º 16
0
    def ceph_poll_status(self):
        # get previous data every time in case:
        # * daemon restarted
        # * alarm was cleared manually but stored as raised in daemon
        self._get_current_alarms()
        if self.current_health_alarm:
            LOG.info(
                _LI("Current alarm: %s") %
                str(self.current_health_alarm.__dict__))

        # get ceph health
        health = self._get_health()
        LOG.info(
            _LI("Current Ceph health: "
                "%(health)s detail: %(detail)s") % health)

        health = self.filter_health_status(health)
        if health['health'] != constants.CEPH_HEALTH_OK:
            self._report_fault(health, fm_constants.FM_ALARM_ID_STORAGE_CEPH)
            self._report_alarm_osds_health()
        else:
            self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH)
            self.clear_all_major_critical()
Exemplo n.º 17
0
    def get_vol_byname(self, volname):
        volumes = self.client.read(self.volumeroot, recursive=True)
        LOG.info(_LI('Get volbyname: volname is %s'), volname)

        for child in volumes.children:
            if child.key != VOLUMEROOT:
                volmember = json.loads(child.value)
                vol = volmember['display_name']
                if vol.startswith(volname, 0, len(volname)):
                    if volmember['display_name'] == volname:
                        return volmember
                elif volmember['name'] == volname:
                    return volmember
        return None
Exemplo n.º 18
0
    def get_vol_byname(self, volname):
        volumes = self.client.read(self.volumeroot, recursive=True)
        LOG.info(_LI('Get volbyname: volname is %s'), volname)

        for child in volumes.children:
            if child.key != VOLUMEROOT:
                volmember = json.loads(child.value)
                vol = volmember['display_name']
                if vol.startswith(volname, 0, len(volname)):
                    if volmember['display_name'] == volname:
                        return volmember
                elif volmember['name'] == volname:
                    return volmember
        return None
    def __init__(self, reactor, hpepluginconfig):
        """
        :param IReactorTime reactor: Reactor time interface implementation.
        :param Ihpepluginconfig : hpedefaultconfig configuration
        """
        LOG.info(_LI('Initialize Volume Plugin'))

        self._reactor = reactor
        self._hpepluginconfig = hpepluginconfig
        hpeplugin_driver = hpepluginconfig.hpedockerplugin_driver

        self.hpeplugin_driver = \
            importutils.import_object(hpeplugin_driver, self._hpepluginconfig)

        if self.hpeplugin_driver is None:
            msg = (_('hpeplugin_driver import driver failed'))
            LOG.error(msg)
            raise exception.HPEPluginNotInitializedException(reason=msg)

        try:
            self.hpeplugin_driver.do_setup()
            self.hpeplugin_driver.check_for_setup_error()
        except Exception as ex:
            msg = (_('hpeplugin_driver do_setup failed, error is: %s'),
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginNotInitializedException(reason=msg)

        self._voltracker = {}
        self._path_info = []
        self._my_ip = netutils.get_my_ipv4()

        self._etcd = util.EtcdUtil(self._hpepluginconfig.host_etcd_ip_address,
                                   self._hpepluginconfig.host_etcd_port_number,
                                   self._hpepluginconfig.host_etcd_client_cert,
                                   self._hpepluginconfig.host_etcd_client_key)

        # TODO: make device_scan_attempts configurable
        # see nova/virt/libvirt/volume/iscsi.py
        root_helper = 'sudo'
        self.use_multipath = self._hpepluginconfig.use_multipath
        self.enforce_multipath = self._hpepluginconfig.enforce_multipath
        self.connector = connector.InitiatorConnector.factory(
            'ISCSI',
            root_helper,
            use_multipath=self.use_multipath,
            device_scan_attempts=5,
            transport='default')
Exemplo n.º 20
0
def osd_pool_delete(ceph_api, pool_name):
    """Delete an osd pool
    :param pool_name:  pool name
    """
    response, body = ceph_api.osd_pool_delete(
        pool_name,
        pool_name,
        sure='--yes-i-really-really-mean-it',
        body='json')
    if response.ok:
        LOG.info(_LI("Deleted OSD pool {}").format(pool_name))
    else:
        e = exception.CephPoolDeleteFailure(name=pool_name,
                                            reason=response.reason)
        LOG.warn(e)
        raise e
Exemplo n.º 21
0
 def rados_cache_flush_evict_all(pool):
     backing_pool = pool['pool_name']
     cache_pool = backing_pool + '-cache'
     try:
         subprocess.check_call(
             ['/usr/bin/rados', '-p', cache_pool, 'cache-flush-evict-all'])
         LOG.info(
             _LI("Flushed OSD cache pool:"
                 "cache_pool={}").format(cache_pool))
     except subprocess.CalledProcessError as e:
         _e = exception.CephCacheFlushFailure(cache_pool=cache_pool,
                                              return_code=str(e.returncode),
                                              cmd=" ".join(e.cmd),
                                              output=e.output)
         LOG.warn(_e)
         raise _e
    def __init__(self, reactor, hpepluginconfig):
        """
        :param IReactorTime reactor: Reactor time interface implementation.
        :param Ihpepluginconfig : hpedefaultconfig configuration
        """
        LOG.info(_LI('Initialize Volume Plugin'))

        self._reactor = reactor
        self._hpepluginconfig = hpepluginconfig
        hpeplugin_driver = hpepluginconfig.hpedockerplugin_driver

        self.hpeplugin_driver = \
            importutils.import_object(hpeplugin_driver, self._hpepluginconfig)

        if self.hpeplugin_driver is None:
            msg = (_('hpeplugin_driver import driver failed'))
            LOG.error(msg)
            raise exception.HPEPluginNotInitializedException(reason=msg)

        try:
            self.hpeplugin_driver.do_setup()
            self.hpeplugin_driver.check_for_setup_error()
        except Exception as ex:
            msg = (_('hpeplugin_driver do_setup failed, error is: %s'),
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginNotInitializedException(reason=msg)

        self._voltracker = {}
        self._path_info = []
        self._my_ip = netutils.get_my_ipv4()

        self._etcd = util.EtcdUtil(
            self._hpepluginconfig.host_etcd_ip_address,
            self._hpepluginconfig.host_etcd_port_number,
            self._hpepluginconfig.host_etcd_client_cert,
            self._hpepluginconfig.host_etcd_client_key)

        # TODO: make device_scan_attempts configurable
        # see nova/virt/libvirt/volume/iscsi.py
        root_helper = 'sudo'
        self.use_multipath = self._hpepluginconfig.use_multipath
        self.enforce_multipath = self._hpepluginconfig.enforce_multipath
        self.connector = connector.InitiatorConnector.factory(
            'ISCSI', root_helper, use_multipath=self.use_multipath,
            device_scan_attempts=5, transport='default')
Exemplo n.º 23
0
 def set_flag_require_jewel_osds(self):
     try:
         response, body = self.service.ceph_api.osd_set_key(
             constants.CEPH_FLAG_REQUIRE_JEWEL_OSDS, body='json')
         LOG.info(_LI("Set require_jewel_osds flag"))
     except IOError as e:
         raise exception.CephApiFailure(call="osd_set_key",
                                        reason=e.message)
     else:
         if not response.ok:
             raise exception.CephSetKeyFailure(
                 flag=constants.CEPH_FLAG_REQUIRE_JEWEL_OSDS,
                 extra=_("needed to complete upgrade to Jewel"),
                 response_status_code=response.status_code,
                 response_reason=response.reason,
                 status=body.get('status'),
                 output=body.get('output'))
    def volumedriver_remove(self, name):
        """
        Remove a Docker volume.

        :param unicode name: The name of the volume.

        :return: Result indicating success.
        """
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']

        # Only 1 node in a multinode cluster can try to remove the volume.
        # Grab lock for volume name. If lock is inuse, just return with no
        # error.
        self._lock_volume(volname, 'Remove')

        vol = self._etcd.get_vol_byname(volname)
        if vol is None:
            # Just log an error, but don't fail the docker rm command
            msg = (_LE('Volume remove name not found %s'), volname)
            LOG.error(msg)
            self._unlock_volume(volname)
            return json.dumps({u"Err": ''})

        try:
            self.hpeplugin_driver.delete_volume(vol)
            LOG.info(_LI('volume: %(name)s,'
                         'was successfully deleted'), {'name': volname})
        except Exception as ex:
            msg = (_LE('Err: Failed to remove volume %s, error is %s'),
                   volname, six.text_type(ex))
            LOG.error(msg)
            self._unlock_volume(volname)
            raise exception.HPEPluginRemoveException(reason=msg)

        try:
            self._etcd.delete_vol(vol)
        except KeyError:
            msg = (_LW('Warning: Failed to delete volume key: %s from '
                       'etcd due to KeyError'), volname)
            LOG.warning(msg)
            pass

        self._unlock_volume(volname)
        return json.dumps({u"Err": ''})
    def volumedriver_remove(self, name):
        """
        Remove a Docker volume.

        :param unicode name: The name of the volume.

        :return: Result indicating success.
        """
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']

        # Only 1 node in a multinode cluster can try to remove the volume.
        # Grab lock for volume name. If lock is inuse, just return with no
        # error.
        self._lock_volume(volname, 'Remove')

        vol = self._etcd.get_vol_byname(volname)
        if vol is None:
            # Just log an error, but don't fail the docker rm command
            msg = (_LE('Volume remove name not found %s'), volname)
            LOG.error(msg)
            self._unlock_volume(volname)
            return json.dumps({u"Err": ''})

        try:
            self.hpeplugin_driver.delete_volume(vol)
            LOG.info(_LI('volume: %(name)s,' 'was successfully deleted'),
                     {'name': volname})
        except Exception as ex:
            msg = (_LE('Err: Failed to remove volume %s, error is %s'),
                   volname, six.text_type(ex))
            LOG.error(msg)
            self._unlock_volume(volname)
            raise exception.HPEPluginRemoveException(reason=msg)

        try:
            self._etcd.delete_vol(vol)
        except KeyError:
            msg = (_LW('Warning: Failed to delete volume key: %s from '
                       'etcd due to KeyError'), volname)
            LOG.warning(msg)
            pass

        self._unlock_volume(volname)
        return json.dumps({u"Err": ''})
Exemplo n.º 26
0
 def cache_overlay_delete(self, pool):
     backing_pool = pool['pool_name']
     cache_pool = pool['pool_name']
     response, body = self.service.ceph_api.osd_tier_remove_overlay(
         backing_pool, body='json')
     if response.ok:
         LOG.info(
             _LI("Removed OSD tier overlay: "
                 "backing_pool={}").format(backing_pool))
     else:
         e = exception.CephCacheDeleteOverlayFailure(
             backing_pool=backing_pool,
             cache_pool=cache_pool,
             response_status_code=response.status_code,
             response_reason=response.reason,
             status=body.get('status'),
             output=body.get('output'))
         LOG.warn(e)
         raise e
Exemplo n.º 27
0
    def ceph_get_fsid(self):
        # Check whether an alarm has already been raised
        self._get_current_alarms()
        if self.current_health_alarm:
            LOG.info(
                _LI("Current alarm: %s") %
                str(self.current_health_alarm.__dict__))

        fsid = self._get_fsid()
        if not fsid:
            # Raise alarm - it will not have an entity_instance_id
            self._report_fault(
                {
                    'health': constants.CEPH_HEALTH_DOWN,
                    'detail': 'Ceph cluster is down.'
                }, fm_constants.FM_ALARM_ID_STORAGE_CEPH)
        else:
            # Clear alarm with no entity_instance_id
            self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH)
            self.service.entity_instance_id = 'cluster=%s' % fsid
Exemplo n.º 28
0
 def cache_mode_set(self, pool, mode):
     backing_pool = pool['pool_name']
     cache_pool = backing_pool + '-cache'
     response, body = self.service.ceph_api.osd_tier_cachemode(cache_pool,
                                                               mode,
                                                               body='json')
     if response.ok:
         LOG.info(
             _LI("Set OSD tier cache mode: "
                 "cache_pool={}, mode={}").format(cache_pool, mode))
     else:
         e = exception.CephCacheSetModeFailure(
             cache_pool=cache_pool,
             mode=mode,
             response_status_code=response.status_code,
             response_reason=response.reason,
             status=body.get('status'),
             output=body.get('output'))
         LOG.warn(e)
         raise e
Exemplo n.º 29
0
 def cache_tier_add(self, pool):
     backing_pool = pool['pool_name']
     cache_pool = backing_pool + '-cache'
     response, body = self.service.ceph_api.osd_tier_add(
         backing_pool,
         cache_pool,
         force_nonempty="--force-nonempty",
         body='json')
     if response.ok:
         LOG.info(
             _LI("Added OSD tier: "
                 "backing_pool={}, cache_pool={}").format(
                     backing_pool, cache_pool))
     else:
         e = exception.CephPoolAddTierFailure(
             backing_pool=backing_pool,
             cache_pool=cache_pool,
             response_status_code=response.status_code,
             response_reason=response.reason,
             status=body.get('status'),
             output=body.get('output'))
         LOG.warn(e)
         raise e
Exemplo n.º 30
0
    def delete_vol(self, vol):
        volkey = self.volumeroot + vol['id']

        self.client.delete(volkey)
        LOG.info(_LI('Deleted key: %s from etcd'), volkey)
Exemplo n.º 31
0
    def save_vol(self, vol):
        volkey = self.volumeroot + vol['id']
        volval = json.dumps(vol)

        self.client.write(volkey, volval)
        LOG.info(_LI('Write key: %s to etc, value is: %s'), volkey, volval)
Exemplo n.º 32
0
    def delete_vol(self, vol):
        volkey = self.volumeroot + vol['id']

        self.client.delete(volkey)
        LOG.info(_LI('Deleted key: %s from etcd'), volkey)
    def volumedriver_unmount(self, name):
        """
        The Docker container is no longer using the given volume,
        so unmount it.
        NOTE: Since Docker will automatically call Unmount if the Mount
        fails, make sure we properly handle partially completed Mounts.

        :param unicode name: The name of the volume.
        :return: Result indicating success.
        """
        LOG.info(_LI('In volumedriver_unmount'))
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']
        vol = self._etcd.get_vol_byname(volname)
        if vol is not None:
            volid = vol['id']
        else:
            msg = (_LE('Volume unmount name not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        vol_mount = DEFAULT_MOUNT_VOLUME
        if ('Opts' in contents and contents['Opts']
                and 'mount-volume' in contents['Opts']):
            vol_mount = str(contents['Opts']['mount-volume'])

        path_info = self._etcd.get_vol_path_info(volname)
        if path_info:
            path_name = path_info['path']
            connection_info = path_info['connection_info']
            mount_dir = path_info['mount_dir']
        else:
            msg = (_LE('Volume unmount path info not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        # Get connector info from OS Brick
        # TODO: retrieve use_multipath and enforce_multipath from config file
        root_helper = 'sudo'

        connector_info = connector.get_connector_properties(
            root_helper,
            self._my_ip,
            multipath=self.use_multipath,
            enforce_multipath=self.enforce_multipath)

        # Determine if we need to unmount a previously mounted volume
        if vol_mount is DEFAULT_MOUNT_VOLUME:
            # unmount directory
            fileutil.umount_dir(mount_dir)
            # remove directory
            fileutil.remove_dir(mount_dir)

        # We're deferring the execution of the disconnect_volume as it can take
        # substantial
        # time (over 2 minutes) to cleanup the iscsi files
        if connection_info:
            LOG.info(_LI('call os brick to disconnect volume'))
            d = threads.deferToThread(self.connector.disconnect_volume,
                                      connection_info['data'], None)
            d.addCallbacks(self.disconnect_volume_callback,
                           self.disconnect_volume_error_callback)

        try:
            # Call driver to terminate the connection
            self.hpeplugin_driver.terminate_connection(vol, connector_info)
            LOG.info(
                _LI('connection_info: %(connection_info)s, '
                    'was successfully terminated'),
                {'connection_info': json.dumps(connection_info)})
        except Exception as ex:
            msg = (_LE('connection info termination failed %s'),
                   six.text_type(ex))
            LOG.error(msg)
            # Not much we can do here, so just continue on with unmount
            # We need to ensure we update etcd path_info so the stale
            # path does not stay around
            # raise exception.HPEPluginUMountException(reason=msg)

        # TODO: Create path_info list as we can mount the volume to multiple
        # hosts at the same time.
        self._etcd.update_vol(volid, 'path_info', None)

        LOG.info(
            _LI('path for volume: %(name)s, was successfully removed: '
                '%(path_name)s'), {
                    'name': volname,
                    'path_name': path_name
                })

        response = json.dumps({u"Err": ''})
        return response
 def plugin_activate(self, ignore_body=True):
     """
     Return which Docker plugin APIs this object supports.
     """
     LOG.info(_LI('In Plugin Activate'))
     return json.dumps({u"Implements": [u"VolumeDriver"]})
 def disconnect_volume_error_callback(self, connector_info):
     LOG.info(
         _LI('In disconnect_volume_error_callback: '
             'connector info is %s'), json.dumps(connector_info))
Exemplo n.º 36
0
    def do_disable_cache(self, new_config, applied_config, lock_ownership):
        LOG.info(
            _LI("cache_tiering_disable_cache: "
                "new_config={}, applied_config={}").format(
                    new_config, applied_config))
        with lock_ownership():
            success = False
            _exception = None
            try:
                self.config_desired.cache_enabled = False
                for pool in CEPH_POOLS:
                    if (pool['pool_name']
                            == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL
                            or pool['pool_name']
                            == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER):
                        object_pool_name = \
                          self.service.monitor._get_object_pool_name()
                        pool['pool_name'] = object_pool_name

                    with self.ignore_ceph_failure():
                        self.cache_mode_set(pool, 'forward')

                for pool in CEPH_POOLS:
                    if (pool['pool_name']
                            == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL
                            or pool['pool_name']
                            == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER):
                        object_pool_name = \
                          self.service.monitor._get_object_pool_name()
                        pool['pool_name'] = object_pool_name

                    retries_left = 3
                    while True:
                        try:
                            self.cache_flush(pool)
                            break
                        except exception.CephCacheFlushFailure:
                            retries_left -= 1
                            if not retries_left:
                                # give up
                                break
                            else:
                                time.sleep(1)
                for pool in CEPH_POOLS:
                    if (pool['pool_name']
                            == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL
                            or pool['pool_name']
                            == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER):
                        object_pool_name = \
                          self.service.monitor._get_object_pool_name()
                        pool['pool_name'] = object_pool_name

                    with self.ignore_ceph_failure():
                        self.cache_overlay_delete(pool)
                        self.cache_tier_remove(pool)
                for pool in CEPH_POOLS:
                    if (pool['pool_name']
                            == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL
                            or pool['pool_name']
                            == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER):
                        object_pool_name = \
                          self.service.monitor._get_object_pool_name()
                        pool['pool_name'] = object_pool_name

                    with self.ignore_ceph_failure():
                        self.cache_pool_delete(pool)
                success = True
            except Exception as e:
                LOG.warn(
                    _LE('Failed to disable cache: reason=%s') %
                    traceback.format_exc())
                _exception = str(e)
            finally:
                self.service.monitor.monitor_check_cache_tier(False)
                if success:
                    self.config_desired.cache_enabled = False
                    self.config_applied.cache_enabled = False
                self.service.sysinv_conductor.call(
                    {},
                    'cache_tiering_disable_cache_complete',
                    success=success,
                    exception=_exception,
                    new_config=new_config.to_dict(),
                    applied_config=applied_config.to_dict())
    def volumedriver_unmount(self, name):
        """
        The Docker container is no longer using the given volume,
        so unmount it.
        NOTE: Since Docker will automatically call Unmount if the Mount
        fails, make sure we properly handle partially completed Mounts.

        :param unicode name: The name of the volume.
        :return: Result indicating success.
        """
        LOG.info(_LI('In volumedriver_unmount'))
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']
        vol = self._etcd.get_vol_byname(volname)
        if vol is not None:
            volid = vol['id']
        else:
            msg = (_LE('Volume unmount name not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        path_info = self._etcd.get_vol_path_info(volname)
        if path_info:
            path_name = path_info['path']
            connection_info = path_info['connection_info']
            mount_dir = path_info['mount_dir']
        else:
            msg = (_LE('Volume unmount path info not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        # Get connector info from OS Brick
        # TODO: retrieve use_multipath and enforce_multipath from config file
        root_helper = 'sudo'
        use_multipath = False

        connector_info = connector.get_connector_properties(
            root_helper, self._my_ip, use_multipath, enforce_multipath=False)
        # unmount directory
        fileutil.umount_dir(mount_dir)
        # remove directory
        fileutil.remove_dir(mount_dir)

        try:
            # Call driver to terminate the connection
            self.hpeplugin_driver.terminate_connection(vol, connector_info)
            LOG.info(_LI('connection_info: %(connection_info)s, '
                         'was successfully terminated'),
                     {'connection_info': json.dumps(connection_info)})
        except Exception as ex:
            msg = (_LE('connection info termination failed %s'),
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        # We're deferring the execution of the disconnect_volume as it can take
        # substantial
        # time (over 2 minutes) to cleanup the iscsi files
        if connection_info:
            LOG.info(_LI('call os brick to disconnect volume'))
            d = threads.deferToThread(self.connector.disconnect_volume,
                                      connection_info['data'], None)
            d.addCallbacks(self.disconnect_volume_callback,
                           self.disconnect_volume_error_callback)

        # TODO(leeantho) Without this sleep the volume is sometimes not
        # removed after the unmount. There must be a different way to fix
        # the issue?
        time.sleep(1)

        # TODO: Create path_info list as we can mount the volume to multiple
        # hosts at the same time.
        self._etcd.update_vol(volid, 'path_info', None)

        LOG.info(_LI('path for volume: %(name)s, was successfully removed: '
                     '%(path_name)s'), {'name': volname,
                                        'path_name': path_name})

        response = json.dumps({u"Err": ''})
        return response
Exemplo n.º 38
0
    def save_vol(self, vol):
        volkey = self.volumeroot + vol['id']
        volval = json.dumps(vol)

        self.client.write(volkey, volval)
        LOG.info(_LI('Write key: %s to etc, value is: %s'), volkey, volval)
 def plugin_activate(self, ignore_body=True):
     """
     Return which Docker plugin APIs this object supports.
     """
     LOG.info(_LI('In Plugin Activate'))
     return json.dumps({u"Implements": [u"VolumeDriver"]})
 def disconnect_volume_error_callback(self, connector_info):
     LOG.info(_LI('In disconnect_volume_error_callback: '
                  'connector info is %s'), json.dumps(connector_info))
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginStartPluginException(reason=msg)

        # Set Logging level
        logging_level = hpedefaultconfig.logging
        setup_logging('hpe_storage_api', logging_level)

        self._create_listening_directory(PLUGIN_PATH.parent())
        endpoint = serverFromString(self._reactor, "unix:{}:mode=600".
                                    format(PLUGIN_PATH.path))
        servicename = StreamServerEndpointService(endpoint, Site(
            VolumePlugin(self._reactor, hpedefaultconfig).app.resource()))
        return servicename

hpedockerplugin = HPEDockerPluginService()

# this will hold the services that combine to form the poetry server
top_service = service.MultiService()

hpepluginservice = hpedockerplugin.setupservice()
hpepluginservice.setServiceParent(top_service)

# this variable has to be named 'application'
application = service.Application("hpedockerplugin")

# this hooks the collection we made to the application
top_service.setServiceParent(application)

LOG.info(_LI('HPE Docker Volume Plugin Successfully Started'))
    def volumedriver_unmount(self, name):
        """
        The Docker container is no longer using the given volume,
        so unmount it.
        NOTE: Since Docker will automatically call Unmount if the Mount
        fails, make sure we properly handle partially completed Mounts.

        :param unicode name: The name of the volume.
        :return: Result indicating success.
        """
        LOG.info(_LI('In volumedriver_unmount'))
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']
        vol = self._etcd.get_vol_byname(volname)
        if vol is not None:
            volid = vol['id']
        else:
            msg = (_LE('Volume unmount name not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        vol_mount = DEFAULT_MOUNT_VOLUME
        if ('Opts' in contents and contents['Opts'] and
                'mount-volume' in contents['Opts']):
            vol_mount = str(contents['Opts']['mount-volume'])

        path_info = self._etcd.get_vol_path_info(volname)
        if path_info:
            path_name = path_info['path']
            connection_info = path_info['connection_info']
            mount_dir = path_info['mount_dir']
        else:
            msg = (_LE('Volume unmount path info not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        # Get connector info from OS Brick
        # TODO: retrieve use_multipath and enforce_multipath from config file
        root_helper = 'sudo'

        connector_info = connector.get_connector_properties(
            root_helper, self._my_ip, multipath=self.use_multipath,
            enforce_multipath=self.enforce_multipath)

        # Determine if we need to unmount a previously mounted volume
        if vol_mount is DEFAULT_MOUNT_VOLUME:
            # unmount directory
            fileutil.umount_dir(mount_dir)
            # remove directory
            fileutil.remove_dir(mount_dir)

        # We're deferring the execution of the disconnect_volume as it can take
        # substantial
        # time (over 2 minutes) to cleanup the iscsi files
        if connection_info:
            LOG.info(_LI('call os brick to disconnect volume'))
            d = threads.deferToThread(self.connector.disconnect_volume,
                                      connection_info['data'], None)
            d.addCallbacks(self.disconnect_volume_callback,
                           self.disconnect_volume_error_callback)

        try:
            # Call driver to terminate the connection
            self.hpeplugin_driver.terminate_connection(vol, connector_info)
            LOG.info(_LI('connection_info: %(connection_info)s, '
                         'was successfully terminated'),
                     {'connection_info': json.dumps(connection_info)})
        except Exception as ex:
            msg = (_LE('connection info termination failed %s'),
                   six.text_type(ex))
            LOG.error(msg)
            # Not much we can do here, so just continue on with unmount
            # We need to ensure we update etcd path_info so the stale
            # path does not stay around
            # raise exception.HPEPluginUMountException(reason=msg)

        # TODO: Create path_info list as we can mount the volume to multiple
        # hosts at the same time.
        self._etcd.update_vol(volid, 'path_info', None)

        LOG.info(_LI('path for volume: %(name)s, was successfully removed: '
                     '%(path_name)s'), {'name': volname,
                                        'path_name': path_name})

        response = json.dumps({u"Err": ''})
        return response