Exemple #1
0
    def ghost_nfs_share(self, ghost_share):
        """Bind mount the local units nfs share to another sites location

        :param ghost_share: NFS share URL to ghost
        :type ghost_share: str
        """
        nfs_share_path = os.path.join(
            TV_MOUNTS, self._encode_endpoint(hookenv.config("nfs-shares"))
        )
        ghost_share_path = os.path.join(
            TV_MOUNTS, self._encode_endpoint(ghost_share)
        )

        current_mounts = [mount[0] for mount in host.mounts()]

        if nfs_share_path not in current_mounts:
            # Trilio has not mounted the NFS share so return
            raise NFSShareNotMountedException(
                "nfs-shares ({}) not mounted".format(
                    hookenv.config("nfs-shares")
                )
            )

        if ghost_share_path in current_mounts:
            # bind mount already setup so return
            raise GhostShareAlreadyMountedException(
                "ghost mountpoint ({}) already bound".format(ghost_share_path)
            )

        if not os.path.exists(ghost_share_path):
            os.mkdir(ghost_share_path)

        host.mount(nfs_share_path, ghost_share_path, options="bind")
Exemple #2
0
 def write_fstab(self):
     for (mnt, dev) in host.mounts():
         if self.sftp_dir in mnt:
             host.umount(mnt, persist=True)
     for entry in self.parse_config():
         host.mount(
             entry["src"],
             "{}/{}/{}".format(self.sftp_dir, entry["user"], entry["name"]),
             "bind,_netdev,x-systemd.requires={}".format(self.sftp_dir),
             persist=True,
             filesystem="none",
         )
         if self.charm_config["sftp-chown-mnt"]:
             try:
                 shutil.chown(
                     "{}/{}/{}".format(self.sftp_dir, entry["user"],
                                       entry["name"]),
                     user=entry["user"],
                     group=entry["user"],
                 )
             except Exception as e:
                 hookenv.log("Chown failed: {}".format(e),
                             level=hookenv.WARNING)
         else:
             try:
                 shutil.chown(
                     "{}/{}/{}".format(self.sftp_dir, entry["user"],
                                       entry["name"]),
                     user="******",
                     group="sftp",
                 )
             except Exception as e:
                 hookenv.log("Chown failed: {}".format(e),
                             level=hookenv.WARNING)
Exemple #3
0
    def test_lists_the_mount_points(self):
        with patch_open() as (mock_open, mock_file):
            mock_file.readlines.return_value = MOUNT_LINES
            result = host.mounts()

            self.assertEqual(
                result, [['/', 'rootfs'], ['/sys', 'sysfs'], ['/proc', 'proc'],
                         ['/dev', 'udev'], ['/dev/pts', 'devpts']])
            mock_open.assert_called_with('/proc/mounts')
Exemple #4
0
def reconfig_charm():
    bkp_type = config('backup-target-type')
    retry_count = 0

    # Stop the service
    service_stop('tvault-contego')
    if bkp_type == 's3':
        service_stop('tvault-object-store')

    # Get the mount points and un-mount tvault's mount points.
    mount_points = mounts()
    sorted_list = [mp[0] for mp in mount_points
                   if config('tv-data-dir') in mp[0]]
    # stopping the tvault-object-store service may take time
    while service_running('tvault-object-store') and retry_count < 3:
        log('Waiting for tvault-object-store service to stop')
        retry_count += 1
        time.sleep(5)

    for sl in sorted_list:
        umount(sl)

    # Valildate backup target
    if not validate_backup():
        log("Failed while validating backup")
        status_set(
            'blocked',
            'Invalid Backup target info, please provide valid info')
        return

    if not create_conf():
        log("Failed while creating conf files")
        status_set('blocked', 'Failed while creating conf files')
        return

    # Re-start the object-store service
    if bkp_type == 's3':
        service_restart('tvault-object-store')

    # Re-start the datamover service
    service_restart('tvault-contego')

    # Reconfig successful
    status_set('active', 'Ready...')
def uninstall_plugin():
    """
    Uninstall TrilioVault DataMover packages
    """
    retry_count = 0
    bkp_type = config('backup-target-type')
    try:
        path = config('tvault-datamover-virtenv')
        service_stop('tvault-contego')
        os.system('sudo systemctl disable tvault-contego')
        os.system('rm -rf /etc/systemd/system/tvault-contego.service')
        if bkp_type == 's3':
            service_stop('tvault-object-store')
            os.system('systemctl disable tvault-object-store')
            os.system('rm -rf /etc/systemd/system/tvault-object-store.service')
        os.system('sudo systemctl daemon-reload')
        os.system('rm -rf {}'.format(path))
        os.system('rm -rf /etc/logrotate.d/tvault-contego')
        os.system('rm -rf {}'.format(config('tv-datamover-conf')))
        os.system('rm -rf /var/log/nova/tvault-contego.log')
        # Get the mount points and un-mount tvault's mount points.
        mount_points = mounts()
        sorted_list = [
            mp[0] for mp in mount_points if config('tv-data-dir') in mp[0]
        ]
        # stopping the tvault-object-store service may take time
        while service_running('tvault-object-store') and retry_count < 3:
            log('Waiting for tvault-object-store service to stop')
            retry_count += 1
            time.sleep(5)

        for sl in sorted_list:
            umount(sl)
        # Uninstall tvault-contego package
        apt_purge(['tvault-contego'])

        log("TrilioVault Datamover package uninstalled successfully")
        return True
    except Exception as e:
        # package uninstallation failed
        log("TrilioVault Datamover package un-installation failed:"
            " {}".format(e))
        return False
def clean_storage(block_device):
    '''Ensures a block device is clean.  That is:
        - unmounted
        - any lvm volume groups are deactivated
        - any lvm physical device signatures removed
        - partition table wiped

    :param block_device: str: Full path to block device to clean.
    '''
    for mp, d in mounts():
        if d == block_device:
            juju_log('clean_storage(): Found %s mounted @ %s, unmounting.' %
                     (d, mp))
            umount(mp, persist=True)

    if is_lvm_physical_volume(block_device):
        deactivate_lvm_volume_group(block_device)
        remove_lvm_physical_volume(block_device)

    zap_disk(block_device)
Exemple #7
0
def clean_storage(block_device):
    '''Ensures a block device is clean.  That is:
        - unmounted
        - any lvm volume groups are deactivated
        - any lvm physical device signatures removed
        - partition table wiped

    :param block_device: str: Full path to block device to clean.
    '''
    for mp, d in mounts():
        if d == block_device:
            log('clean_storage(): Found %s mounted @ %s, unmounting.' %
                (d, mp))
            umount(mp, persist=True)

    if is_lvm_physical_volume(block_device):
        deactivate_lvm_volume_group(block_device)
        remove_lvm_physical_volume(block_device)

    zap_disk(block_device)
def uninstall_plugin():
    """
    Uninstall TrilioVault DataMover packages
    """
    retry_count = 0
    try:
        path = TVAULT_VIRTENV_PATH
        service_stop('tvault-contego')
        subprocess.check_call(
            ['sudo', 'systemctl', 'disable', 'tvault-contego'])
        os.remove('/etc/systemd/system/tvault-contego.service')
        subprocess.check_call('sudo systemctl daemon-reload')
        shutil.rmtree(path)
        os.remove('/etc/logrotate.d/tvault-contego')
        os.remove(DATAMOVER_CONF)
        os.remove('/var/log/nova/tvault-contego.log')
        # Get the mount points and un-mount tvault's mount points.
        mount_points = mounts()
        sorted_list = [mp[0] for mp in mount_points
                       if TV_DATA_DIR in mp[0]]
        # stopping the tvault-object-store service may take time
        while service_running('tvault-object-store') and retry_count < 3:
            log('Waiting for tvault-object-store service to stop')
            retry_count += 1
            time.sleep(5)

        for sl in sorted_list:
            umount(sl)
        # Uninstall tvault-contego package
        apt_purge(['tvault-contego'])

        log("TrilioVault Datamover package uninstalled successfully")
        return True
    except Exception as e:
        # package uninstallation failed
        log("TrilioVault Datamover package un-installation failed:"
            " {}".format(e))
        return False
Exemple #9
0
def managed_mounts():
    '''List of all mounted managed volumes'''
    return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
def filesystem_mounted(fs):
    ''' Determine whether a filesytems is already mounted '''
    return fs in [f for f, m in mounts()]
Exemple #11
0
def filesystem_mounted(fs):
    """Determine whether a filesytems is already mounted."""
    return fs in [f for f, m in mounts()]
Exemple #12
0
def filesystem_mounted(fs):
    return fs in [f for f, m in mounts()]
Exemple #13
0
def nfs_is_mounted(mountpoint):
    mounts = host.mounts()
    for local, remote in mounts:
        if remote == mountpoint:
            return True
    return False
Exemple #14
0
def replace_osd(dead_osd_number,
                dead_osd_device,
                new_osd_device,
                osd_format,
                osd_journal,
                reformat_osd=False,
                ignore_errors=False):
    """
    This function will automate the replacement of a failed osd disk as much
    as possible. It will revoke the keys for the old osd, remove it from the
    crush map and then add a new osd into the cluster.
    :param dead_osd_number: The osd number found in ceph osd tree. Example: 99
    :param dead_osd_device: The physical device.  Example: /dev/sda
    :param osd_format:
    :param osd_journal:
    :param reformat_osd:
    :param ignore_errors:
    """
    host_mounts = mounts()
    mount_point = None
    for mount in host_mounts:
        if mount[1] == dead_osd_device:
            mount_point = mount[0]
    # need to convert dev to osd number
    # also need to get the mounted drive so we can tell the admin to
    # replace it
    try:
        # Drop this osd out of the cluster. This will begin a
        # rebalance operation
        status_set('maintenance', 'Removing osd {}'.format(dead_osd_number))
        subprocess.check_output([
            'ceph',
            '--id',
            'osd-upgrade',
            'osd', 'out',
            'osd.{}'.format(dead_osd_number)])

        # Kill the osd process if it's not already dead
        if systemd():
            service_stop('ceph-osd@{}'.format(dead_osd_number))
        else:
            subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format(
                dead_osd_number)]),
        # umount if still mounted
        ret = umount(mount_point)
        if ret < 0:
            raise RuntimeError('umount {} failed with error: {}'.format(
                mount_point, os.strerror(ret)))
        # Clean up the old mount point
        shutil.rmtree(mount_point)
        subprocess.check_output([
            'ceph',
            '--id',
            'osd-upgrade',
            'osd', 'crush', 'remove',
            'osd.{}'.format(dead_osd_number)])
        # Revoke the OSDs access keys
        subprocess.check_output([
            'ceph',
            '--id',
            'osd-upgrade',
            'auth', 'del',
            'osd.{}'.format(dead_osd_number)])
        subprocess.check_output([
            'ceph',
            '--id',
            'osd-upgrade',
            'osd', 'rm',
            'osd.{}'.format(dead_osd_number)])
        status_set('maintenance', 'Setting up replacement osd {}'.format(
            new_osd_device))
        osdize(new_osd_device,
               osd_format,
               osd_journal,
               reformat_osd,
               ignore_errors)
    except subprocess.CalledProcessError as e:
        log('replace_osd failed with error: ' + e.output)
Exemple #15
0
def filesystem_mounted(fs):
    return fs in [f for f, m in mounts()]
Exemple #16
0
def mounts():
    "List mounts"
    return host.mounts()
Exemple #17
0
def managed_mounts():
    '''List of all mounted managed volumes'''
    return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
Exemple #18
0
def mounts():
    "List mounts"
    return host.mounts()
Exemple #19
0
def managed_mounts():
    '''List of all mounted managed volumes'''
    return [mount for mount in host.mounts() if mount[0].startswith(MOUNT_BASE)]