Пример #1
0
def determine_block_devices():
    block_device = config('block-device')
    if not block_device or block_device.lower() == 'none':
        log("No storage devices specified in 'block_device' config",
            level=ERROR)
        return None

    if block_device == 'guess':
        bdevs = guess_block_devices()
    else:
        bdevs = block_device.split(' ')

    # List storage instances for the 'block-devices'
    # store declared for this charm too, and add
    # their block device paths to the list.
    storage_ids = storage_list('block-devices')
    bdevs.extend((storage_get('location', s) for s in storage_ids))

    # only sorted so the tests pass; doesn't affect functionality
    bdevs = sorted(set(bdevs))
    # attempt to ensure block devices, but filter out missing devs
    _none = ['None', 'none']
    valid_bdevs = \
        [x for x in map(ensure_block_device, bdevs) if str(x).lower() not in
         _none]
    log('Valid ensured block devices: %s' % valid_bdevs)
    return valid_bdevs
Пример #2
0
def get_devices():
    devices = []
    if config('osd-devices'):
        for path in config('osd-devices').split(' '):
            path = path.strip()
            # Ensure that only block devices
            # are considered for evaluation as block devices.
            # This avoids issues with relative directories
            # being passed via configuration, and ensures that
            # the path to a block device provided by the user
            # is used, rather than its target which may change
            # between reboots in the case of bcache devices.
            if is_block_device(path):
                devices.append(path)
            # Make sure its a device which is specified using an
            # absolute path so that the current working directory
            # or any relative path under this directory is not used
            elif os.path.isabs(path):
                devices.append(os.path.realpath(path))

    # List storage instances for the 'osd-devices'
    # store declared for this charm too, and add
    # their block device paths to the list.
    storage_ids = storage_list('osd-devices')
    devices.extend((storage_get('location', s) for s in storage_ids))

    # Filter out any devices in the action managed unit-local device blacklist
    _blacklist = get_blacklist()
    return [device for device in devices if device not in _blacklist]
Пример #3
0
def get_devices():
    devices = []
    if config('osd-devices'):
        for path in config('osd-devices').split(' '):
            path = path.strip()
            # Ensure that only block devices
            # are considered for evaluation as block devices.
            # This avoids issues with relative directories
            # being passed via configuration, and ensures that
            # the path to a block device provided by the user
            # is used, rather than its target which may change
            # between reboots in the case of bcache devices.
            if is_block_device(path):
                devices.append(path)
            # Make sure its a device which is specified using an
            # absolute path so that the current working directory
            # or any relative path under this directory is not used
            elif os.path.isabs(path):
                devices.append(os.path.realpath(path))

    # List storage instances for the 'osd-devices'
    # store declared for this charm too, and add
    # their block device paths to the list.
    storage_ids = storage_list('osd-devices')
    devices.extend((storage_get('location', s) for s in storage_ids))

    # Filter out any devices in the action managed unit-local device blacklist
    _blacklist = get_blacklist()
    return [device for device in devices if device not in _blacklist]
Пример #4
0
def _configure_block_devices():
    """Configure block devices, either from Juju storage or as a local block
    device configured in the config.
    """
    if service_enabled('volume'):
        block_devices = []
        # first see if a specified block device is configured
        conf = config()
        if conf['block-device'] not in [None, 'None', 'none']:
            block_devices.extend(conf['block-device'].split())
        # now see if there are any Juju storage devies configured
        storage_ids = storage_list('block-devices')
        storage_devs = [storage_get('location', s) for s in storage_ids]
        # add them into the block_devices:
        block_devices.extend(storage_devs)
        if block_devices:
            status_set('maintenance', 'Checking configuration of lvm storage')
        # Note that there may be None now, and remove-missing is set to true,
        # so we still have to run the function regardless of whether
        # block_devices is an empty list or not.
        configure_lvm_storage(block_devices,
                              conf['volume-group'],
                              conf['overwrite'] in ['true', 'True', True],
                              conf['remove-missing'],
                              conf['remove-missing-force'])
Пример #5
0
def get_devices():
    '''Get a list of storage devices.'''
    devices = []
    storage_ids = storage_list()
    for sid in storage_ids:
        storage = storage_get('location', sid)
        devices.append(storage)
    return devices
Пример #6
0
def node_dist_dir():
    """ Absolute path of Node.js application dir

    Returns:
    Absolute string of node application directory
    """
    storage_id = storage_list('app')[0]
    return storage_get('location', storage_id)
Пример #7
0
def elixir_dist_dir():
    """ Absolute path of Elixir application dir

    Returns:
    Absolute string of elixir application directory
    """
    storage_id = storage_list('app')[0]
    return storage_get('location', storage_id)
Пример #8
0
def node_dist_dir():
    """ Absolute path of Node.js application dir

    Returns:
    Absolute string of node application directory
    """
    storage_id = storage_list('app')[0]
    return storage_get('location', storage_id)
Пример #9
0
def get_journal_devices():
    if config('osd-journal'):
        devices = [l.strip() for l in config('osd-journal').split(' ')]
    else:
        devices = []
    storage_ids = storage_list('osd-journals')
    devices.extend((storage_get('location', s) for s in storage_ids))
    devices = filter(os.path.exists, devices)

    return set(devices)
Пример #10
0
def get_journal_devices():
    if config('osd-journal'):
        devices = [l.strip() for l in config('osd-journal').split(' ')]
    else:
        devices = []
    storage_ids = storage_list('osd-journals')
    devices.extend((storage_get('location', s) for s in storage_ids))

    # Filter out any devices in the action managed unit-local device blacklist
    _blacklist = get_blacklist()
    return set(device for device in devices
               if device not in _blacklist and os.path.exists(device))
Пример #11
0
def get_journal_devices():
    if config('osd-journal'):
        devices = [l.strip() for l in config('osd-journal').split(' ')]
    else:
        devices = []
    storage_ids = storage_list('osd-journals')
    devices.extend((storage_get('location', s) for s in storage_ids))

    # Filter out any devices in the action managed unit-local device blacklist
    _blacklist = get_blacklist()
    return set(device for device in devices
               if device not in _blacklist and os.path.exists(device))
Пример #12
0
def get_devices():
    if config('osd-devices'):
        devices = [
            os.path.realpath(path)
            for path in config('osd-devices').split(' ')]
    else:
        devices = []
    # List storage instances for the 'osd-devices'
    # store declared for this charm too, and add
    # their block device paths to the list.
    storage_ids = storage_list('osd-devices')
    devices.extend((storage_get('location', s) for s in storage_ids))
    return devices
Пример #13
0
def get_osd_journal():
    '''
    Returns the block device path to use for the OSD journal, if any.

    If there is an osd-journal storage instance attached, it will be
    used as the journal. Otherwise, the osd-journal configuration will
    be returned.
    '''
    storage_ids = storage_list('osd-journal')
    if storage_ids:
        # There can be at most one osd-journal storage instance.
        return storage_get('location', storage_ids[0])
    return config('osd-journal')
Пример #14
0
def get_osd_journal():
    '''
    Returns the block device path to use for the OSD journal, if any.

    If there is an osd-journal storage instance attached, it will be
    used as the journal. Otherwise, the osd-journal configuration will
    be returned.
    '''
    storage_ids = storage_list('osd-journal')
    if storage_ids:
        # There can be at most one osd-journal storage instance.
        return storage_get('location', storage_ids[0])
    return config('osd-journal')
Пример #15
0
def get_devices():
    if config('osd-devices'):
        devices = [
            os.path.realpath(path) for path in config('osd-devices').split(' ')
        ]
    else:
        devices = []
    # List storage instances for the 'osd-devices'
    # store declared for this charm too, and add
    # their block device paths to the list.
    storage_ids = storage_list('osd-devices')
    devices.extend((storage_get('location', s) for s in storage_ids))
    return devices
Пример #16
0
def attach():
    # This happens either with a non existing nextcloud installation
    # -OR-
    # After a nextcloud installation has been performed
    # and the operator has decided to attach storage post installation.
    # in which case the /var/www/nextcloud directory is present.

    storageids = storage_list("data")

    if not storageids:

        status_set("blocked", "Cannot locate attached storage")

        return

    storageid = storageids[0]

    mount = storage_get("location", storageid)

    if not mount:

        hookenv.status_set(
            "blocked",
            "Cannot locate attached storage mount directory for data")

        return

    unitdata.kv().set(data_mount_key, mount)

    log("data storage attached at {}".format(mount))

    # In case storage is attached post deploy, we might have accumulated
    # some data so we need to make sure the attached storage meets our requirements on available disk.
    if os.path.exists('/var/www/nextcloud'):

        required_space = shutil.disk_usage('/var/www/nextcloud/data').used

        free_space = shutil.disk_usage(mount).free

        if required_space > free_space:

            hookenv.status_set("blocked", "attached storage to small.")

            return

    apt.queue_install(["rsync"])

    reactive.set_state("nextcloud.storage.data.attached")
def determine_block_device():
    """Determine the block device to use for ephemeral storage

    :returns: Block device to use for storage
    :rtype: str or None if not configured"""
    config_dev = config('ephemeral-device')

    if config_dev and os.path.exists(config_dev):
        return config_dev

    storage_ids = storage_list('ephemeral-device')
    storage_devs = [storage_get('location', s) for s in storage_ids]

    if storage_devs:
        return storage_devs[0]

    return None
Пример #18
0
def get_devices():
    devices = []
    if config('osd-devices'):
        for path in config('osd-devices').split(' '):
            path = path.strip()
            # Make sure its a device which is specified using an
            # absolute path so that the current working directory
            # or any relative path under this directory is not used
            if os.path.isabs(path):
                devices.append(os.path.realpath(path))

    # List storage instances for the 'osd-devices'
    # store declared for this charm too, and add
    # their block device paths to the list.
    storage_ids = storage_list('osd-devices')
    devices.extend((storage_get('location', s) for s in storage_ids))
    return devices
def attach():
    mount = storage_get()['location']
    unitdata.kv().set(data_mount_key, mount)
    unitdata.kv().set(data_path_key, os.path.join(mount, 'var/lib/docker'))

    log('Docker registry storage attached: {}'.format(mount))

    if os.path.exists('/var/lib/docker'):
        required_space = shutil.disk_usage('/var/lib/docker').used
        free_space = shutil.disk_usage(mount).free

        if required_space > free_space:
            status_set('blocked', 'Not enough free storage space.')
        return

    apt.queue_install(['rsync'])
    set_state('docker-registry.storage.docker-registry.attached')
Пример #20
0
def storage_attach():
    storageids = hookenv.storage_list('state')
    if not storageids:
        hookenv.status_set('blocked', 'cannot locate attached storage')
        return
    storageid = storageids[0]

    mount = hookenv.storage_get('location', storageid)
    if not mount:
        hookenv.status_set('blocked', 'cannot locate attached storage mount')
        return

    state_dir = os.path.join(mount, "state")
    unitdata.kv().set('ksql.storage.state_dir', state_dir)
    hookenv.log('Ksql storage attached at {}'.format(state_dir))

    remove_state('ksql.configured')
    set_state('ksql.storage.state.attached')
Пример #21
0
def get_juju_bricks() -> Result:
    """
    Get the list of bricks from juju storage.
    :return: Result with Ok or Err
    """
    log("Gathering list of juju storage brick devices")
    # Get juju storage devices
    brick_list = []
    juju_config_brick_devices = storage_list()
    for brick in juju_config_brick_devices:
        if brick is None:
            continue
        s = storage_get("location", brick)
        if s is not None:
            brick_list.append(s.strip())

    log("List of juju storage brick devices: {}".format(brick_list))
    bricks = scan_devices(brick_list)
    if bricks.is_err():
        return Err(bricks.value)
    return Ok(bricks.value)
Пример #22
0
def attach():
    mount = hookenv.storage_get()['location']
    pgdata = os.path.join(mount, postgresql.version(), 'main')
    unitdata.kv().set(pgdata_mount_key, mount)
    unitdata.kv().set(pgdata_path_key, pgdata)

    hookenv.log('PGDATA storage attached at {}'.format(mount))

    # Never happens with Juju 2.0 as we can't reuse an old mount. This
    # check is here for the future.
    existingdb = os.path.exists(pgdata)

    required_space = shutil.disk_usage(postgresql.data_dir()).used
    free_space = shutil.disk_usage(mount).free

    if required_space > free_space and not existingdb:
        hookenv.status_set('blocked',
                           'Not enough free space in pgdata storage')
    else:
        apt.queue_install(['rsync'])
        coordinator.acquire('restart')
        reactive.set_state('postgresql.storage.pgdata.attached')
Пример #23
0
def storage_attach():
    storageids = hookenv.storage_list('logs')
    if not storageids:
        hookenv.status_set('blocked', 'cannot locate attached storage')
        return
    storageid = storageids[0]

    mount = hookenv.storage_get('location', storageid)
    if not mount:
        hookenv.status_set('blocked', 'cannot locate attached storage mount')
        return

    log_dir = os.path.join(mount, "logs")
    unitdata.kv().set('kafka.storage.log_dir', log_dir)
    hookenv.log('Kafka logs storage attached at {}'.format(log_dir))

    init_brokerid(log_dir)
    set_flag('kafka.storage.logs.attached')
    # Stop Kafka; removing the kafka.started state will trigger
    # a reconfigure if/when it's ready
    remove_state('kafka.configured')
    set_flag('kafka.force-reconfigure')
Пример #24
0
def storage_attach():
    storageids = hookenv.storage_list('logs')
    if not storageids:
        hookenv.status_set('blocked', 'cannot locate attached storage')
        return
    storageid = storageids[0]

    mount = hookenv.storage_get('location', storageid)
    if not mount:
        hookenv.status_set('blocked', 'cannot locate attached storage mount')
        return

    log_dir = os.path.join(mount, "logs")
    unitdata.kv().set('kafka.storage.log_dir', log_dir)
    hookenv.log('Kafka logs storage attached at {}'.format(log_dir))
    # Stop Kafka; removing the kafka.started state will trigger a reconfigure if/when it's ready
    kafka = Kafka()
    kafka.close_ports()
    kafka.stop()
    remove_state('kafka.started')
    hookenv.status_set('waiting', 'reconfiguring to use attached storage')
    set_state('kafka.storage.logs.attached')
Пример #25
0
def storage_attach():
    storageids = hookenv.storage_list('data')
    if not storageids:
        hookenv.status_set('blocked', 'cannot locate attached storage')
        return
    storageid = storageids[0]

    mount = hookenv.storage_get('location', storageid)
    if not mount:
        hookenv.status_set('blocked', 'cannot locate attached storage mount')
        return

    data_dir = os.path.join(mount, "data")
    unitdata.kv().set('zookeeper.storage.data_dir', data_dir)
    hookenv.log('Zookeeper data storage attached at {}'.format(data_dir))
    # Stop Zookeeper; removing zookeeper.configured state will trigger
    # a reconfigure if/when it's ready
    zookeeper = Zookeeper()
    zookeeper.close_ports()
    zookeeper.stop()
    clear_flag('zookeeper.configured')
    hookenv.status_set('waiting', 'reconfiguring to use attached storage')
    set_flag('zookeeper.storage.data.attached')
Пример #26
0
def encrypt_storage(storage_name, mountbase=None):
    """
    Set up encryption for the given Juju storage entry, and optionally create
    and mount XFS filesystems on the encrypted storage entry location(s).

    Note that the storage entry **must** be defined with ``type: block``.

    If ``mountbase`` is not given, the location(s) will not be formatted or
    mounted.  When interacting with or mounting the location(s) manually, the
    name returned by :func:`decrypted_device` called on the storage entry's
    location should be used in place of the raw location.

    If the storage is defined as ``multiple``, the individual locations
    will be mounted at ``{mountbase}/{storage_name}/{num}`` where ``{num}``
    is based on the storage ID.  Otherwise, the storage will mounted at
    ``{mountbase}/{storage_name}``.
    """
    metadata = hookenv.metadata()
    storage_metadata = metadata['storage'][storage_name]
    if storage_metadata['type'] != 'block':
        raise VaultLockerError('Cannot encrypt non-block storage: {}',
                               storage_name)
    multiple = 'multiple' in storage_metadata
    for storage_id in hookenv.storage_list():
        if not storage_id.startswith(storage_name + '/'):
            continue
        storage_location = hookenv.storage_get('location', storage_id)
        if mountbase and multiple:
            mountpoint = Path(mountbase) / storage_id
        elif mountbase:
            mountpoint = Path(mountbase) / storage_name
        else:
            mountpoint = None
        encrypt_device(storage_location, mountpoint)
        set_flag('layer.vaultlocker.{}.ready'.format(storage_id))
        set_flag('layer.vaultlocker.{}.ready'.format(storage_name))
Пример #27
0
def git_repo_path():
    return storage_get('location', storage_list('repo')[0])
Пример #28
0
    def install(self, zk_units=[], log_dir='logs'):
        '''
        Generates client-ssl.properties and server.properties with the current
        system state.
        '''
        zks = []
        for unit in zk_units or self.get_zks():
            ip = resolve_private_address(unit['host'])
            zks.append('%s:%s' % (ip, unit['port']))
        zks.sort()
        zk_connect = ','.join(zks)

        config = hookenv.config()

        broker_id = None
        storageids = hookenv.storage_list('logs')
        if storageids:
            mount = hookenv.storage_get('location', storageids[0])

            if mount:
                broker_path = os.path.join(log_dir, '.broker_id')

                if os.path.isfile(broker_path):
                    with open(broker_path, 'r') as f:
                        try:
                            broker_id = int(f.read().strip())
                        except ValueError:
                            hookenv.log(
                                '{}'.format('invalid broker id format'))
                            hookenv.status_set(
                                'blocked',
                                'unable to validate broker id format')
                            raise

        if broker_id is None:
            hookenv.status_set('blocked', 'unable to get broker id')
            return

        context = {
            'broker_id':
            broker_id,
            'port':
            KAFKA_PORT,
            'zookeeper_connection_string':
            zk_connect,
            'log_dirs':
            log_dir,
            'keystore_password':
            keystore_password(),
            'ca_keystore':
            caKeystore(),
            'server_keystore':
            keystore('server'),
            'client_keystore':
            keystore('client'),
            'bind_addr':
            hookenv.unit_private_ip(),
            'auto_create_topics':
            config['auto_create_topics'],
            'default_partitions':
            config['default_partitions'],
            'default_replication_factor':
            config['default_replication_factor'],
            'inter_broker_protocol_version':
            config.get('inter_broker_protocol_version'),
            'log_message_format_version':
            config.get('log_message_format_version'),
        }

        render(source='client-ssl.properties',
               target=os.path.join(KAFKA_SNAP_DATA, 'client-ssl.properties'),
               owner='root',
               perms=0o400,
               context=context)

        render(source='server.properties',
               target=os.path.join(KAFKA_SNAP_DATA, 'server.properties'),
               owner='root',
               perms=0o644,
               context=context)

        render(source='broker.env',
               target=os.path.join(KAFKA_SNAP_DATA, 'broker.env'),
               owner='root',
               perms=0o644,
               context={
                   'kafka_heap_opts': config.get('kafka_heap_opts', ''),
               })

        render(
            source='override.conf',
            target=
            '/etc/systemd/system/snap.kafka.kafka.service.d/override.conf',
            owner='root',
            perms=0o644,
            context={},
        )
        check_call(['systemctl', 'daemon-reload'], universal_newlines=True)

        log4j_file = os.path.join(KAFKA_SNAP_DATA, 'log4j.properties')
        if config.get('log4j_properties'):
            with open(log4j_file, 'w') as f:
                print(config['log4j_properties'], file=f)
        elif os.path.exists(log4j_file):
            os.unlink(log4j_file)

        if log_dir:
            os.makedirs(log_dir, mode=0o700, exist_ok=True)
            shutil.chown(log_dir, user='******')

        self.restart()
Пример #29
0
def attach():
    homedir = storage_get()['location']
    set_jenkins_dir(homedir)
Пример #30
0
def repo_root():
    return storage_get('location', storage_list('repo-root')[0])
Пример #31
0
def format_and_mount_storage():
    ''' This allows users to request persistent volumes from the cloud provider
    for the purposes of disaster recovery. '''
    set_state('data.volume.attached')
    # Query juju for the information about the block storage
    device_info = storage_get()
    block = device_info['location']
    bag = EtcdDatabag()
    bag.cluster = leader_get('cluster')
    # the databag has behavior that keeps the path updated.
    # Reference the default path from layer_options.
    etcd_opts = layer.options('etcd')
    # Split the tail of the path to mount the volume 1 level before
    # the data directory.
    tail = os.path.split(bag.etcd_data_dir)[0]

    if volume_is_mounted(block):
        hookenv.log('Device is already attached to the system.')
        hookenv.log('Refusing to take action against {}'.format(block))
        return

    # Format the device in non-interactive mode
    cmd = ['mkfs.ext4', device_info['location'], '-F']
    hookenv.log('Creating filesystem on {}'.format(device_info['location']))
    hookenv.log('With command: {}'.format(' '.join(cmd)))
    check_call(cmd)

    # halt etcd to perform the data-store migration
    host.service_stop(bag.etcd_daemon)

    os.makedirs(tail, exist_ok=True)
    mount_volume(block, tail)
    # handle first run during early-attach storage, pre-config-changed hook.
    os.makedirs(bag.etcd_data_dir, exist_ok=True)

    # Only attempt migration if directory exists
    if os.path.isdir(etcd_opts['etcd_data_dir']):
        migrate_path = "{}/".format(etcd_opts['etcd_data_dir'])
        output_path = "{}/".format(bag.etcd_data_dir)
        cmd = ['rsync', '-azp', migrate_path, output_path]

        hookenv.log('Detected existing data, migrating to new location.')
        hookenv.log('With command: {}'.format(' '.join(cmd)))

        check_call(cmd)

    with open('/etc/fstab', 'r') as fp:
        contents = fp.readlines()

    found = 0
    # scan fstab for the device
    for line in contents:
        if block in line:
            found = found + 1

    # if device not in fstab, append so it persists through reboots
    if not found > 0:
        append = "{0} {1} ext4 defaults 0 0".format(block, tail)  # noqa
        with open('/etc/fstab', 'a') as fp:
            fp.writelines([append])

    # Finally re-render the configuration and resume operation
    render_config(bag)
    host.service_restart(bag.etcd_daemon)
Пример #32
0
def setup_storage():
    storage = storage_get()
    chmod(path=storage.get('location'), mode=0o777)