コード例 #1
0
    def enable_systemd_units(self, osd_id, osd_fsid):
        """
        * disables the ceph-disk systemd units to prevent them from running when
          a UDEV event matches Ceph rules
        * creates the ``simple`` systemd units to handle the activation and
          startup of the OSD with ``osd_id`` and ``osd_fsid``
        * enables the OSD systemd unit and finally starts the OSD.
        """
        if not self.from_trigger and not self.skip_systemd:
            # means it was scanned and now activated directly, so ensure that
            # ceph-disk units are disabled, and that the `simple` systemd unit
            # is created and enabled

            # enable the ceph-volume unit for this OSD
            systemctl.enable_volume(osd_id, osd_fsid, 'simple')

            # disable any/all ceph-disk units
            systemctl.mask_ceph_disk()
            terminal.warning(
                ('All ceph-disk systemd units have been disabled to '
                 'prevent OSDs getting triggered by UDEV events'))
        else:
            terminal.info('Skipping enabling of `simple` systemd unit')
            terminal.info('Skipping masking of ceph-disk systemd units')

        if not self.skip_systemd:
            # enable the OSD
            systemctl.enable_osd(osd_id)

            # start the OSD
            systemctl.start_osd(osd_id)
        else:
            terminal.info(
                'Skipping enabling and starting OSD simple systemd unit because --no-systemd was used'
            )
コード例 #2
0
ファイル: activate.py プロジェクト: LenzGr/ceph
    def enable_systemd_units(self, osd_id, osd_fsid):
        """
        * disables the ceph-disk systemd units to prevent them from running when
          a UDEV event matches Ceph rules
        * creates the ``simple`` systemd units to handle the activation and
          startup of the OSD with ``osd_id`` and ``osd_fsid``
        * enables the OSD systemd unit and finally starts the OSD.
        """
        if not self.from_trigger and not self.skip_systemd:
            # means it was scanned and now activated directly, so ensure that
            # ceph-disk units are disabled, and that the `simple` systemd unit
            # is created and enabled

            # enable the ceph-volume unit for this OSD
            systemctl.enable_volume(osd_id, osd_fsid, 'simple')

            # disable any/all ceph-disk units
            systemctl.mask_ceph_disk()
            terminal.warning(
                ('All ceph-disk systemd units have been disabled to '
                 'prevent OSDs getting triggered by UDEV events')
            )
        else:
            terminal.info('Skipping enabling of `simple` systemd unit')
            terminal.info('Skipping masking of ceph-disk systemd units')

        if not self.skip_systemd:
            # enable the OSD
            systemctl.enable_osd(osd_id)

            # start the OSD
            systemctl.start_osd(osd_id)
        else:
            terminal.info(
                'Skipping enabling and starting OSD simple systemd unit because --no-systemd was used'
            )
コード例 #3
0
ファイル: activate.py プロジェクト: Abhishekvrshny/ceph
    def activate(self, args):
        with open(args.json_config, 'r') as fp:
            osd_metadata = json.load(fp)

        # Make sure that required devices are configured
        self.validate_devices(osd_metadata)

        osd_id = osd_metadata.get('whoami', args.osd_id)
        osd_fsid = osd_metadata.get('fsid', args.osd_fsid)
        data_uuid = osd_metadata.get('data', {}).get('uuid')
        conf.cluster = osd_metadata.get('cluster_name', 'ceph')
        if not data_uuid:
            raise RuntimeError(
                'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id
            )

        # Encryption detection, and capturing of the keys to decrypt
        self.is_encrypted = osd_metadata.get('encrypted', False)
        self.encryption_type = osd_metadata.get('encryption_type')
        if self.is_encrypted:
            lockbox_secret = osd_metadata.get('lockbox.keyring')
            # write the keyring always so that we can unlock
            encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
            # Store the secret around so that the decrypt method can reuse
            raw_dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
            # Note how both these calls need b64decode. For some reason, the
            # way ceph-disk creates these keys, it stores them in the monitor
            # *undecoded*, requiring this decode call again. The lvm side of
            # encryption doesn't need it, so we are assuming here that anything
            # that `simple` scans, will come from ceph-disk and will need this
            # extra decode call here
            self.dmcrypt_secret = base64.b64decode(raw_dmcrypt_secret)

        cluster_name = osd_metadata.get('cluster_name', 'ceph')
        osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id)

        # XXX there is no support for LVM here
        data_device = self.get_device(data_uuid)
        journal_device = self.get_device(osd_metadata.get('journal', {}).get('uuid'))
        block_device = self.get_device(osd_metadata.get('block', {}).get('uuid'))
        block_db_device = self.get_device(osd_metadata.get('block.db', {}).get('uuid'))
        block_wal_device = self.get_device(osd_metadata.get('block.wal', {}).get('uuid'))

        if not system.device_is_mounted(data_device, destination=osd_dir):
            process.run(['mount', '-v', data_device, osd_dir])

        device_map = {
            'journal': journal_device,
            'block': block_device,
            'block.db': block_db_device,
            'block.wal': block_wal_device
        }

        for name, device in device_map.items():
            if not device:
                continue
            # always re-do the symlink regardless if it exists, so that the journal
            # device path that may have changed can be mapped correctly every time
            destination = os.path.join(osd_dir, name)
            process.run(['ln', '-snf', device, destination])

            # make sure that the journal has proper permissions
            system.chown(device)

        if not self.systemd:
            # enable the ceph-volume unit for this OSD
            systemctl.enable_volume(osd_id, osd_fsid, 'simple')

            # disable any/all ceph-disk units
            systemctl.mask_ceph_disk()

        # enable the OSD
        systemctl.enable_osd(osd_id)

        # start the OSD
        systemctl.start_osd(osd_id)

        terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid))
        terminal.warning(
            ('All ceph-disk systemd units have been disabled to '
             'prevent OSDs getting triggered by UDEV events')
        )
コード例 #4
0
    def activate(self, args):
        with open(args.json_config, 'r') as fp:
            osd_metadata = json.load(fp)

        osd_id = osd_metadata.get('whoami', args.osd_id)
        osd_fsid = osd_metadata.get('fsid', args.osd_fsid)

        cluster_name = osd_metadata.get('cluster_name', 'ceph')
        osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id)
        data_uuid = osd_metadata.get('data', {}).get('uuid')
        if not data_uuid:
            raise RuntimeError(
                'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id
            )
        data_device = disk.get_device_from_partuuid(data_uuid)
        journal_device = disk.get_device_from_partuuid(osd_metadata.get('journal', {}).get('uuid'))
        block_device = disk.get_device_from_partuuid(osd_metadata.get('block', {}).get('uuid'))
        block_db_device = disk.get_device_from_partuuid(osd_metadata.get('block.db', {}).get('uuid'))
        block_wal_device = disk.get_device_from_partuuid(
            osd_metadata.get('block.wal', {}).get('uuid')
        )

        if not system.device_is_mounted(data_device, destination=osd_dir):
            process.run(['mount', '-v', data_device, osd_dir])

        device_map = {
            'journal': journal_device,
            'block': block_device,
            'block.db': block_db_device,
            'block.wal': block_wal_device
        }

        for name, device in device_map.items():
            if not device:
                continue
            # always re-do the symlink regardless if it exists, so that the journal
            # device path that may have changed can be mapped correctly every time
            destination = os.path.join(osd_dir, name)
            process.run(['ln', '-snf', device, destination])

            # make sure that the journal has proper permissions
            system.chown(device)

        if not self.systemd:
            # enable the ceph-volume unit for this OSD
            systemctl.enable_volume(osd_id, osd_fsid, 'simple')

            # disable any/all ceph-disk units
            systemctl.mask_ceph_disk()

        # enable the OSD
        systemctl.enable_osd(osd_id)

        # start the OSD
        systemctl.start_osd(osd_id)

        terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid))
        terminal.warning(
            ('All ceph-disk systemd units have been disabled to '
             'prevent OSDs getting triggered by UDEV events')
        )
コード例 #5
0
ファイル: activate.py プロジェクト: xiaoxichen/ceph
    def activate(self, args):
        with open(args.json_config, 'r') as fp:
            osd_metadata = json.load(fp)

        osd_id = osd_metadata.get('whoami', args.osd_id)
        osd_fsid = osd_metadata.get('fsid', args.osd_fsid)

        cluster_name = osd_metadata.get('cluster_name', 'ceph')
        osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id)
        data_uuid = osd_metadata.get('data', {}).get('uuid')
        if not data_uuid:
            raise RuntimeError(
                'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id
            )
        data_device = disk.get_device_from_partuuid(data_uuid)
        journal_device = disk.get_device_from_partuuid(osd_metadata.get('journal', {}).get('uuid'))
        block_device = disk.get_device_from_partuuid(osd_metadata.get('block', {}).get('uuid'))
        block_db_device = disk.get_device_from_partuuid(osd_metadata.get('block.db', {}).get('uuid'))
        block_wal_device = disk.get_device_from_partuuid(
            osd_metadata.get('block.wal', {}).get('uuid')
        )

        if not system.device_is_mounted(data_device, destination=osd_dir):
            process.run(['sudo', 'mount', '-v', data_device, osd_dir])

        device_map = {
            'journal': journal_device,
            'block': block_device,
            'block.db': block_db_device,
            'block.wal': block_wal_device
        }

        for name, device in device_map.items():
            if not device:
                continue
            # always re-do the symlink regardless if it exists, so that the journal
            # device path that may have changed can be mapped correctly every time
            destination = os.path.join(osd_dir, name)
            process.run(['sudo', 'ln', '-snf', device, destination])

            # make sure that the journal has proper permissions
            system.chown(device)

        if not self.systemd:
            # enable the ceph-volume unit for this OSD
            systemctl.enable_volume(osd_id, osd_fsid, 'simple')

            # disable any/all ceph-disk units
            systemctl.mask_ceph_disk()

        # enable the OSD
        systemctl.enable_osd(osd_id)

        # start the OSD
        systemctl.start_osd(osd_id)

        if not self.systemd:
            terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid))
            terminal.warning(
                ('All ceph-disk systemd units have been disabled to '
                 'prevent OSDs getting triggered by UDEV events')
            )
コード例 #6
0
    def activate(self, args):
        with open(args.json_config, 'r') as fp:
            osd_metadata = json.load(fp)

        # Make sure that required devices are configured
        self.validate_devices(osd_metadata)

        osd_id = osd_metadata.get('whoami', args.osd_id)
        osd_fsid = osd_metadata.get('fsid', args.osd_fsid)
        data_uuid = osd_metadata.get('data', {}).get('uuid')
        conf.cluster = osd_metadata.get('cluster_name', 'ceph')
        if not data_uuid:
            raise RuntimeError(
                'Unable to activate OSD %s - no "uuid" key found for data' %
                args.osd_id)

        # Encryption detection, and capturing of the keys to decrypt
        self.is_encrypted = osd_metadata.get('encrypted', False)
        self.encryption_type = osd_metadata.get('encryption_type')
        if self.is_encrypted:
            lockbox_secret = osd_metadata.get('lockbox.keyring')
            # write the keyring always so that we can unlock
            encryption_utils.write_lockbox_keyring(osd_id, osd_fsid,
                                                   lockbox_secret)
            # Store the secret around so that the decrypt method can reuse
            raw_dmcrypt_secret = encryption_utils.get_dmcrypt_key(
                osd_id, osd_fsid)
            # Note how both these calls need b64decode. For some reason, the
            # way ceph-disk creates these keys, it stores them in the monitor
            # *undecoded*, requiring this decode call again. The lvm side of
            # encryption doesn't need it, so we are assuming here that anything
            # that `simple` scans, will come from ceph-disk and will need this
            # extra decode call here
            self.dmcrypt_secret = base64.b64decode(raw_dmcrypt_secret)

        cluster_name = osd_metadata.get('cluster_name', 'ceph')
        osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id)

        # XXX there is no support for LVM here
        data_device = self.get_device(data_uuid)
        journal_device = self.get_device(
            osd_metadata.get('journal', {}).get('uuid'))
        block_device = self.get_device(
            osd_metadata.get('block', {}).get('uuid'))
        block_db_device = self.get_device(
            osd_metadata.get('block.db', {}).get('uuid'))
        block_wal_device = self.get_device(
            osd_metadata.get('block.wal', {}).get('uuid'))

        if not system.device_is_mounted(data_device, destination=osd_dir):
            process.run(['mount', '-v', data_device, osd_dir])

        device_map = {
            'journal': journal_device,
            'block': block_device,
            'block.db': block_db_device,
            'block.wal': block_wal_device
        }

        for name, device in device_map.items():
            if not device:
                continue
            # always re-do the symlink regardless if it exists, so that the journal
            # device path that may have changed can be mapped correctly every time
            destination = os.path.join(osd_dir, name)
            process.run(['ln', '-snf', device, destination])

            # make sure that the journal has proper permissions
            system.chown(device)

        if not self.systemd:
            # enable the ceph-volume unit for this OSD
            systemctl.enable_volume(osd_id, osd_fsid, 'simple')

            # disable any/all ceph-disk units
            systemctl.mask_ceph_disk()

        # enable the OSD
        systemctl.enable_osd(osd_id)

        # start the OSD
        systemctl.start_osd(osd_id)

        terminal.success('Successfully activated OSD %s with FSID %s' %
                         (osd_id, osd_fsid))
        terminal.warning(('All ceph-disk systemd units have been disabled to '
                          'prevent OSDs getting triggered by UDEV events'))