def _validation(self):
     """
     Check that host id is not already in use
     """
     if self.storageType in (
         ohostedcons.VDSMConstants.ISCSI_DOMAIN,
         ohostedcons.VDSMConstants.FC_DOMAIN,
     ):
         # For iSCSI we need to connect the pool for
         # having /rhev populated.
         self._storagePoolConnection()
         # And we need also to connect metadata LVMs
         # Prepare the Backend interface
         # Get UUIDs of the storage
         lockspace = self.environment[
             ohostedcons.SanlockEnv.LOCKSPACE_NAME
         ]
         activate_devices = {
             lockspace + '.lockspace': (
                 storage_backends.VdsmBackend.Device(
                     image_uuid=self.environment[
                         ohostedcons.StorageEnv.
                         LOCKSPACE_IMAGE_UUID
                     ],
                     volume_uuid=self.environment[
                         ohostedcons.StorageEnv.
                         LOCKSPACE_VOLUME_UUID
                     ],
                 )
             ),
             lockspace + '.metadata': (
                 storage_backends.VdsmBackend.Device(
                     image_uuid=self.environment[
                         ohostedcons.StorageEnv.
                         METADATA_IMAGE_UUID
                     ],
                     volume_uuid=self.environment[
                         ohostedcons.StorageEnv.
                         METADATA_VOLUME_UUID
                     ],
                 )
             ),
         }
         backend = storage_backends.VdsmBackend(
             sd_uuid=self.environment[
                 ohostedcons.StorageEnv.SD_UUID
             ],
             sp_uuid=self.environment[
                 ohostedcons.StorageEnv.SP_UUID
             ],
             dom_type=self.environment[
                 ohostedcons.StorageEnv.DOMAIN_TYPE
             ],
             **activate_devices
         )
         with ohostedutil.VirtUserContext(
             self.environment,
             # umask 007
             umask=stat.S_IRWXO
         ):
             backend.connect()
     all_host_stats = {}
     with ohostedutil.VirtUserContext(
         environment=self.environment,
         umask=stat.S_IWGRP | stat.S_IWOTH,
     ):
         ha_cli = client.HAClient()
         all_host_stats = ha_cli.get_all_host_stats_direct(
             dom_type=self.environment[
                 ohostedcons.StorageEnv.DOMAIN_TYPE
             ],
             sd_uuid=self.environment[
                 ohostedcons.StorageEnv.SD_UUID
             ],
             service_type=self.environment[
                 ohostedcons.SanlockEnv.LOCKSPACE_NAME
             ] + ".metadata",
         )
     if (
         int(
             self.environment[ohostedcons.StorageEnv.HOST_ID]
         ) in all_host_stats.keys() and
         not self._re_deploying_host()
     ):
         raise RuntimeError(
             _('Invalid value for Host ID: already used')
         )
    def _misc(self):
        """
        Here the storage pool is connected and activated.
        Pass needed configuration to HA VdsmBackend for initializing
        the metadata and lockspace volumes.
        """
        self.logger.info(_('Verifying sanlock lockspace initialization'))
        self.services.state(
            name=self.environment[ohostedcons.SanlockEnv.SANLOCK_SERVICE],
            state=True,
        )

        dom_type = self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE]
        lockspace = self.environment[ohostedcons.SanlockEnv.LOCKSPACE_NAME]
        host_id = self.environment[ohostedcons.StorageEnv.HOST_ID]

        sp_uuid = self.environment[ohostedcons.StorageEnv.SP_UUID]
        if self.environment[ohostedcons.Upgrade.UPGRADE_CREATE_LM_VOLUMES]:
            cli = self.environment[ohostedcons.VDSMEnv.VDS_CLI]
            res = cli.getStorageDomainInfo(storagedomainID=self.environment[
                ohostedcons.StorageEnv.SD_UUID])
            self.logger.debug(res)
            if 'status' not in res or res['status']['code'] != 0:
                raise RuntimeError(
                    _('Failed getting storage domain info: {m}').format(
                        m=res['status']['message'], ))
            sp_uuid = res['pool'][0]

        # Prepare the Backend interface
        # - this supports nfs, iSCSI and Gluster automatically
        activate_devices = {
            lockspace + '.lockspace': None,  # created by backend
            lockspace + '.metadata': None,  # created by backend
        }
        backend = storage_backends.VdsmBackend(
            sd_uuid=self.environment[ohostedcons.StorageEnv.SD_UUID],
            sp_uuid=sp_uuid,
            dom_type=dom_type,
            **activate_devices)
        backend.set_external_logger(self.logger)

        # Compute the size needed to store metadata for all hosts
        # and for the global cluster state
        md_size = (ohostedcons.Const.METADATA_CHUNK_SIZE *
                   (ohostedcons.Const.MAX_HOST_ID + 1))

        with ohostedutil.VirtUserContext(
                self.environment,
                # umask 007
                umask=stat.S_IRWXO):
            # Create storage for he metadata and sanlock lockspace
            # 1MB is good for 2000 clients when the block size is 512B
            created = backend.create({
                lockspace + '.lockspace':
                1024 * 1024 * backend.blocksize / 512,
                lockspace + '.metadata':
                md_size,
            })

            # Get UUIDs of the storage
            metadata_device = backend.get_device(lockspace + '.metadata')
            self.environment[
                ohostedcons.StorageEnv.
                METADATA_VOLUME_UUID] = metadata_device.volume_uuid
            self.environment[ohostedcons.StorageEnv.
                             METADATA_IMAGE_UUID] = metadata_device.image_uuid

            lockspace_device = backend.get_device(lockspace + '.lockspace')
            self.environment[
                ohostedcons.StorageEnv.
                LOCKSPACE_VOLUME_UUID] = lockspace_device.volume_uuid
            self.environment[
                ohostedcons.StorageEnv.
                LOCKSPACE_IMAGE_UUID] = lockspace_device.image_uuid

            # for lv_based storage (like iscsi) creates symlinks in /rhev/..
            # for nfs does nothing (the real files are already in /rhev/..)
            backend.connect()

            # Get the path to sanlock lockspace area
            lease_file, offset = backend.filename(lockspace + '.lockspace')

            agent_data_dir = os.path.dirname(lease_file)

            stat_info = os.stat(agent_data_dir)
            # only change it when it's not already owned by vdsm,
            # because on NFS we don't need the chown and it won't work
            if stat_info.st_uid != self.environment[
                    ohostedcons.VDSMEnv.VDSM_UID]:
                os.chown(agent_data_dir,
                         self.environment[ohostedcons.VDSMEnv.VDSM_UID],
                         self.environment[ohostedcons.VDSMEnv.KVM_GID])
            # Update permissions on the lockspace directory to 0755
            os.chmod(agent_data_dir,
                     stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)

            self.logger.debug(('Ensuring lease for lockspace {lockspace}, '
                               'host id {host_id} '
                               'is acquired (file: {lease_file})').format(
                                   lockspace=lockspace,
                                   host_id=host_id,
                                   lease_file=lease_file,
                               ))

        # Reinitialize the sanlock lockspace
        # if it was newly created or updated
        if (lockspace + '.lockspace') in created:
            sanlock.write_lockspace(lockspace=lockspace,
                                    path=lease_file,
                                    offset=offset)
        backend.disconnect()
Beispiel #3
0
    def _validation(self):
        """
        Check that host id is not already in use
        """
        if self.storageType in (
                ohostedcons.VDSMConstants.ISCSI_DOMAIN,
                ohostedcons.VDSMConstants.FC_DOMAIN,
        ):
            # For iSCSI/FC we need to explicitly call getStorageDomainStats
            # to create/refresh the storage domain directory tree.
            result = self.cli.getStorageDomainStats(
                self.environment[ohostedcons.StorageEnv.SD_UUID], )
            self.logger.debug('getStorageDomainStats: {result}'.format(
                result=result, ))
            if result['status']['code'] != 0:
                raise RuntimeError(
                    'Unable to get storage domain stats: {message}'.format(
                        message=result['status']['message'], ))

            # We need to connect metadata LVMs
            # Prepare the Backend interface
            # Get UUIDs of the storage
            lockspace = self.environment[ohostedcons.SanlockEnv.LOCKSPACE_NAME]
            activate_devices = {
                lockspace + '.lockspace': (storage_backends.VdsmBackend.Device(
                    image_uuid=self.environment[
                        ohostedcons.StorageEnv.LOCKSPACE_IMAGE_UUID],
                    volume_uuid=self.environment[
                        ohostedcons.StorageEnv.LOCKSPACE_VOLUME_UUID],
                )),
                lockspace + '.metadata': (storage_backends.VdsmBackend.Device(
                    image_uuid=self.environment[
                        ohostedcons.StorageEnv.METADATA_IMAGE_UUID],
                    volume_uuid=self.environment[
                        ohostedcons.StorageEnv.METADATA_VOLUME_UUID],
                )),
            }
            backend = storage_backends.VdsmBackend(
                sd_uuid=self.environment[ohostedcons.StorageEnv.SD_UUID],
                sp_uuid=self.environment[ohostedcons.StorageEnv.SP_UUID],
                dom_type=self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE],
                **activate_devices)
            with ohostedutil.VirtUserContext(
                    self.environment,
                    # umask 007
                    umask=stat.S_IRWXO):
                backend.connect()

        # prepareImage to populate /var/run/vdsm/storage
        for imgVolUUID in [
            [
                self.environment[ohostedcons.StorageEnv.IMG_UUID],
                self.environment[ohostedcons.StorageEnv.VOL_UUID]
            ],
            [
                self.environment[ohostedcons.StorageEnv.METADATA_IMAGE_UUID],
                self.environment[ohostedcons.StorageEnv.METADATA_VOLUME_UUID]
            ],
            [
                self.environment[ohostedcons.StorageEnv.LOCKSPACE_IMAGE_UUID],
                self.environment[ohostedcons.StorageEnv.LOCKSPACE_VOLUME_UUID]
            ],
            [
                self.environment[ohostedcons.StorageEnv.CONF_IMG_UUID],
                self.environment[ohostedcons.StorageEnv.CONF_VOL_UUID]
            ],
        ]:
            self.cli.prepareImage(
                self.environment[ohostedcons.StorageEnv.SP_UUID],
                self.environment[ohostedcons.StorageEnv.SD_UUID],
                imgVolUUID[0],
                imgVolUUID[1],
            )

        all_host_stats = {}
        with ohostedutil.VirtUserContext(
                environment=self.environment,
                umask=stat.S_IWGRP | stat.S_IWOTH,
        ):
            ha_cli = client.HAClient()
            all_host_stats = ha_cli.get_all_host_stats_direct(
                dom_type=self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE],
                sd_uuid=self.environment[ohostedcons.StorageEnv.SD_UUID],
                service_type=self.environment[
                    ohostedcons.SanlockEnv.LOCKSPACE_NAME] + ".metadata",
            )
        if (self.environment[ohostedcons.StorageEnv.HOST_ID]
                in all_host_stats.keys() and not self._re_deploying_host()):
            raise RuntimeError(_('Invalid value for Host ID: already used'))