コード例 #1
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(service=service_name(), user='******',
                               group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and relation_get('key'):
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID,
                              key=relation_get('key'))

    if (config('libvirt-image-backend') == 'rbd' and
            assert_libvirt_imagebackend_allowed()):
        if is_request_complete(get_ceph_request()):
            log('Request complete')
            # Ensure that nova-compute is restarted since only now can we
            # guarantee that ceph resources are ready.
            service_restart('nova-compute')
        else:
            send_request_if_needed(get_ceph_request())
コード例 #2
0
    def create_pool(self, name, replicas=3):
        """
        Request pool setup

        @param name: name of pool to create
        @param replicas: number of replicas for supporting pools
        """
        # json.dumps of the CephBrokerRq()
        requests = self.get_local(key='broker_reqs') or {}

        if name not in requests:
            rq = CephBrokerRq()
            rq.add_op_create_pool(name="{}".format(name),
                                  replica_count=replicas,
                                  weight=None)
            if not requests:
                requests = {}

            requests[name] = rq.request
            self.set_local(key='broker_reqs', value=requests)
            send_request_if_needed(rq, relation=self.relation_name)
            self.remove_state('{relation_name}.pools.available')
        else:
            rq = CephBrokerRq()
            try:
                j = json.loads(requests[name])
                rq.ops = j['ops']
                send_request_if_needed(rq, relation=self.relation_name)
            except ValueError as err:
                log("Unable to decode broker_req: {}.  Error: {}".format(
                    requests[name], err))
コード例 #3
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    service = service_name()
    if not ensure_ceph_keyring(service=service,
                               user='******', group='cinder'):
        log('Could not create ceph keyring: peer not ready?')
        return

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        CONFIGS.write_all()
        set_ceph_env_variables(service=service)
        for rid in relation_ids('backup-backend'):
            backup_backend_joined(rid)

        # Ensure that cinder services are restarted since only now can we
        # guarantee that ceph resources are ready. Note that the order of
        # restart is important here.
        for svc in ['cinder-volume', 'cinder-backup']:
            service_restart(svc)

    else:
        send_request_if_needed(get_ceph_request())
コード例 #4
0
    def create_replicated_pool(self,
                               name,
                               replicas=3,
                               weight=None,
                               pg_num=None,
                               group=None,
                               namespace=None,
                               app_name=None,
                               max_bytes=None,
                               max_objects=None):
        """Request replicated pool setup.
        """
        # Ensure type of numeric values before sending over the wire
        replicas = int(replicas) if replicas else None
        weight = float(weight) if weight else None
        pg_num = int(pg_num) if pg_num else None
        max_bytes = int(max_bytes) if max_bytes else None
        max_objects = int(max_objects) if max_objects else None

        current_request = self.get_current_request()
        current_request.add_op_create_replicated_pool(name="{}".format(name),
                                                      replica_count=replicas,
                                                      pg_num=pg_num,
                                                      weight=weight,
                                                      group=group,
                                                      namespace=namespace,
                                                      app_name=app_name,
                                                      max_bytes=max_bytes,
                                                      max_objects=max_objects)
        ch_ceph.send_request_if_needed(current_request,
                                       relation=self.endpoint_name)
コード例 #5
0
    def create_erasure_pool(self,
                            name,
                            erasure_profile=None,
                            weight=None,
                            group=None,
                            app_name=None,
                            max_bytes=None,
                            max_objects=None):
        """Request erasure coded pool setup.
        """
        # Ensure type of numeric values before sending over the wire
        weight = float(weight) if weight else None
        max_bytes = int(max_bytes) if max_bytes else None
        max_objects = int(max_objects) if max_objects else None

        current_request = self.get_current_request()
        current_request.add_op_create_erasure_pool(
            name="{}".format(name),
            erasure_profile=erasure_profile,
            weight=weight,
            group=group,
            app_name=app_name,
            max_bytes=max_bytes,
            max_objects=max_objects)
        ch_ceph.send_request_if_needed(current_request,
                                       relation=self.endpoint_name)
コード例 #6
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    service = service_name()
    if not ensure_ceph_keyring(service=service, user='******', group='cinder'):
        log('Could not create ceph keyring: peer not ready?')
        return

    try:
        if is_request_complete(get_ceph_request()):
            log('Request complete')
            CONFIGS.write_all()
            for rid in relation_ids('storage-backend'):
                storage_backend(rid)
            for r_id in relation_ids('ceph-access'):
                ceph_access_joined(r_id)
            # Ensure that cinder-volume is restarted since only now can we
            # guarantee that ceph resources are ready.
            service_restart('cinder-volume')
        else:
            send_request_if_needed(get_ceph_request())
    except ValueError as e:
        # The end user has most likely provided a invalid value for a
        # configuration option. Just log the traceback here, the end user will
        # be notified by assess_status() called at the end of the hook
        # execution.
        log('Caught ValueError, invalid value provided for configuration?: '
            '"{}"'.format(str(e)),
            level=DEBUG)
コード例 #7
0
def ceph_changed(rid=None, unit=None):
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(service=service_name(), user='******',
                               group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    key = relation_get(attribute='key', rid=rid, unit=unit)
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key:
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID, key=key)

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        # Ensure that nova-compute is restarted since only now can we
        # guarantee that ceph resources are ready, but only if not paused.
        if (not is_unit_paused_set() and
                not is_broker_action_done('nova_compute_restart', rid,
                                          unit)):
            service_restart('nova-compute')
            mark_broker_action_done('nova_compute_restart', rid, unit)
    else:
        send_request_if_needed(get_ceph_request())
コード例 #8
0
def ceph_changed(rid=None, unit=None):
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(
            service=service_name(), user='******', group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    key = relation_get(attribute='key', rid=rid, unit=unit)
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key:
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID,
                              key=key)

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        # Ensure that nova-compute is restarted since only now can we
        # guarantee that ceph resources are ready, but only if not paused.
        if (not is_unit_paused_set() and
                not is_broker_action_done('nova_compute_restart', rid, unit)):
            service_restart('nova-compute')
            mark_broker_action_done('nova_compute_restart', rid, unit)
    else:
        send_request_if_needed(get_ceph_request())
コード例 #9
0
def mon_relation(rid=None, unit=None):
    rq = ceph.get_create_rgw_pools_rq(prefix=config('pool-prefix'))
    if is_request_complete(rq, relation='mon'):
        log('Broker request complete', level=DEBUG)
        CONFIGS.write_all()
        key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
        if key:
            ceph.import_radosgw_key(key)
            if not is_unit_paused_set():
                restart()  # TODO figure out a better way todo this
    else:
        send_request_if_needed(rq, relation='mon')
コード例 #10
0
ファイル: hooks.py プロジェクト: openstack/charm-ceph-radosgw
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        if request_per_unit_key():
            relation_set(relation_id=rid,
                         key_name=key_name)
        # NOTE: prefer zone name if in use over pool-prefix.
        rq = ceph.get_create_rgw_pools_rq(
            prefix=config('zone') or config('pool-prefix'))
        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid, unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key',
                                   rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key,
                                                      name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                service('enable', service_name())
                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and
                        new_keyring and
                        not multisite_deployment()):
                    service_restart(service_name())

            process_multisite_relations()
        else:
            send_request_if_needed(rq, relation='mon')
コード例 #11
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        if request_per_unit_key():
            relation_set(relation_id=rid, key_name=key_name)
        # NOTE: prefer zone name if in use over pool-prefix.
        rq = ceph.get_create_rgw_pools_rq(
            prefix=config('zone') or config('pool-prefix'))
        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid,
                               unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key, name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and new_keyring
                        and not multisite_deployment()):
                    log('Resume service "{}" as we now have keys for it.'.
                        format(service_name()),
                        level=DEBUG)
                    service_resume(service_name())

            process_multisite_relations()
        else:
            send_request_if_needed(rq, relation='mon')
コード例 #12
0
    def maybe_send_rq(self, rq):
        """Send single broker request with all operations if needed.

        The rbd-mirror charm has two endpoints using this interface connected
        to the ceph-mon in the local and remote clusters. Subsequently each
        relation typically only has one other participant, the ceph-mon.

        The charm will recieve a verbatim copy of every broker request the
        ceph-mon knows about in one end and then extract and filter all the
        operations and collapse into one new single broker request that is
        maintained with the ceph-mon in the other end.

        :param rq: Broker Request to evaluate for sending.
        :type rq: ch_ceph.CephBrokerRq
        """
        for relation in self.relations:
            ch_ceph.send_request_if_needed(rq, relation=self.endpoint_name)
コード例 #13
0
    def request_ceph_permissions(self, ceph):
        rq = CephBrokerRq()

        json_rq = ceph.get_local(key='broker_req')
        if json_rq:
            try:
                j = json.loads(json_rq)
                log("Json request: {}".format(json_rq))
                rq.set_ops(j['ops'])
            except ValueError as err:
                log("Unable to decode broker_req: {}. Error {}".format(
                    json_rq, err))

        rq.add_op({'op': 'set-key-permissions',
                   'permissions': CEPH_CAPABILITIES,
                   'client': 'manila-ganesha'})
        ceph.set_local(key='broker_req', value=rq.request)
        send_request_if_needed(rq, relation='ceph')
コード例 #14
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        juju_log('ceph relation incomplete. Peer not ready?')
        return

    service = service_name()
    if not ensure_ceph_keyring(service=service, user='******', group='glance'):
        juju_log('Could not create ceph keyring: peer not ready?')
        return

    if is_request_complete(get_ceph_request()):
        juju_log('Request complete')
        CONFIGS.write(GLANCE_API_CONF)
        CONFIGS.write(ceph_config_file())
        # Ensure that glance-api is restarted since only now can we
        # guarantee that ceph resources are ready.
        service_restart('glance-api')
    else:
        send_request_if_needed(get_ceph_request())
コード例 #15
0
    def create_replicated_pool(self,
                               name,
                               replicas=3,
                               weight=None,
                               pg_num=None,
                               group=None,
                               namespace=None,
                               app_name=None,
                               max_bytes=None,
                               max_objects=None):
        """Request replicated pool setup.

        Refer to charm-helpers ``add_op_create_replicated_pool`` function for
        documentation of parameters.
        """
        # Ensure type of numeric values before sending over the wire
        replicas = int(replicas) if replicas else None
        weight = float(weight) if weight else None
        pg_num = int(pg_num) if pg_num else None
        max_bytes = int(max_bytes) if max_bytes else None
        max_objects = int(max_objects) if max_objects else None

        for relation in self.relations:
            current_request = ch_ceph.get_previous_request(
                relation.relation_id) or ch_ceph.CephBrokerRq()
            for req in current_request.ops:
                if 'op' in req and 'name' in req:
                    if req['op'] == 'create-pool' and req['name'] == name:
                        # request already exists, don't create a new one
                        return
            current_request.add_op_create_replicated_pool(
                name="{}".format(name),
                replica_count=replicas,
                pg_num=pg_num,
                weight=weight,
                group=group,
                namespace=namespace,
                app_name=app_name,
                max_bytes=max_bytes,
                max_objects=max_objects)
            ch_ceph.send_request_if_needed(current_request,
                                           relation=self.endpoint_name)
コード例 #16
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        juju_log('ceph relation incomplete. Peer not ready?')
        return

    service = service_name()
    if not ensure_ceph_keyring(service=service,
                               user='******', group='glance'):
        juju_log('Could not create ceph keyring: peer not ready?')
        return

    if is_request_complete(get_ceph_request()):
        juju_log('Request complete')
        CONFIGS.write(GLANCE_API_CONF)
        CONFIGS.write(ceph_config_file())
        # Ensure that glance-api is restarted since only now can we
        # guarantee that ceph resources are ready.
        service_restart('glance-api')
    else:
        send_request_if_needed(get_ceph_request())
コード例 #17
0
ファイル: cinder_hooks.py プロジェクト: phausman/charm-cinder
def ceph_changed(relation_id=None):
    if 'ceph' not in CONFIGS.complete_contexts():
        juju_log('ceph relation incomplete. Peer not ready?')
        return

    service = service_name()
    if not ensure_ceph_keyring(service=service, user='******', group='cinder'):
        juju_log('Could not create ceph keyring: peer not ready?')
        return

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        CONFIGS.write(CINDER_CONF)
        CONFIGS.write(ceph_config_file())
        # Ensure that cinder-volume is restarted since only now can we
        # guarantee that ceph resources are ready.
        if not is_unit_paused_set():
            service_restart('cinder-volume')
    else:
        send_request_if_needed(get_ceph_request())
コード例 #18
0
ファイル: cinder_hooks.py プロジェクト: dosaboy/charm-cinder
def ceph_changed(relation_id=None):
    if 'ceph' not in CONFIGS.complete_contexts():
        juju_log('ceph relation incomplete. Peer not ready?')
        return

    service = service_name()
    if not ensure_ceph_keyring(service=service,
                               user='******', group='cinder'):
        juju_log('Could not create ceph keyring: peer not ready?')
        return

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        set_ceph_env_variables(service=service)
        CONFIGS.write(CINDER_CONF)
        CONFIGS.write(ceph_config_file())
        # Ensure that cinder-volume is restarted since only now can we
        # guarantee that ceph resources are ready.
        service_restart('cinder-volume')
    else:
        send_request_if_needed(get_ceph_request())
コード例 #19
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    service = service_name()
    if not ensure_ceph_keyring(service=service, user='******', group='cinder'):
        log('Could not create ceph keyring: peer not ready?')
        return

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        CONFIGS.write_all()
        for rid in relation_ids('storage-backend'):
            storage_backend(rid)
        for r_id in relation_ids('ceph-access'):
            ceph_access_joined(r_id)
        # Ensure that cinder-volume is restarted since only now can we
        # guarantee that ceph resources are ready.
        service_restart('cinder-volume')
    else:
        send_request_if_needed(get_ceph_request())
コード例 #20
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    service = service_name()
    if not ensure_ceph_keyring(service=service,
                               user='******', group='cinder'):
        log('Could not create ceph keyring: peer not ready?')
        return

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        CONFIGS.write_all()
        for rid in relation_ids('storage-backend'):
            storage_backend(rid)
        for r_id in relation_ids('ceph-access'):
            ceph_access_joined(r_id)
        # Ensure that cinder-volume is restarted since only now can we
        # guarantee that ceph resources are ready.
        service_restart('cinder-volume')
    else:
        send_request_if_needed(get_ceph_request())
コード例 #21
0
    def initialize_mds(self, name, replicas=3):
        """
        Request pool setup and mds creation

        @param name: name of mds pools to create
        @param replicas: number of replicas for supporting pools
        """
        # json.dumps of the CephBrokerRq()
        json_rq = self.get_local(key='broker_req')

        if not json_rq:
            rq = CephBrokerRq()
            rq.add_op_create_pool(name="{}_data".format(name),
                                  replica_count=replicas,
                                  weight=None)
            rq.add_op_create_pool(name="{}_metadata".format(name),
                                  replica_count=replicas,
                                  weight=None)
            # Create CephFS
            rq.ops.append({
                'op': 'create-cephfs',
                'mds_name': name,
                'data_pool': "{}_data".format(name),
                'metadata_pool': "{}_metadata".format(name),
            })
            self.set_local(key='broker_req', value=rq.request)
            send_request_if_needed(rq, relation=self.relation_name)
        else:
            rq = CephBrokerRq()
            try:
                j = json.loads(json_rq)
                log("Json request: {}".format(json_rq))
                rq.ops = j['ops']
                send_request_if_needed(rq, relation=self.relation_name)
            except ValueError as err:
                log("Unable to decode broker_req: {}.  Error: {}".format(
                    json_rq, err))
コード例 #22
0
    def create_pools(self, names, replicas=3):
        """
        Request pools setup

        @param name: list of pool names to create
        @param replicas: number of replicas for supporting pools
        """
        # json.dumps of the CephBrokerRq()
        requests = self.get_local(key='broker_reqs') or {}

        new_names = [name for name in names if name not in requests]

        # existing names get ignored here
        # new names get added to a single request
        if new_names:
            rq = CephBrokerRq()
            for name in new_names:
                rq.add_op_create_pool(name="{}".format(name),
                                      replica_count=replicas,
                                      weight=None)
                requests[name] = rq.request
            self.set_local(key='broker_reqs', value=requests)
            send_request_if_needed(rq, relation=self.relation_name)
            self.remove_state('{relation_name}.pools.available')
コード例 #23
0
ファイル: add_disk.py プロジェクト: MartinHell/charm-ceph-osd
        ceph.tune_dev(dev)
    mounts = filter(lambda disk: device_path
                    in disk.device, psutil.disk_partitions())
    if mounts:
        osd = mounts[0]
        osd_id = osd.mountpoint.split('/')[-1].split('-')[-1]
        request.ops.append({
            'op': 'move-osd-to-bucket',
            'osd': "osd.{}".format(osd_id),
            'bucket': bucket})
    return request


def get_devices():
    devices = []
    for path in action_get('osd-devices').split(' '):
        path = path.strip()
        if os.path.isabs(path):
            devices.append(path)

    return devices


if __name__ == "__main__":
    request = CephBrokerRq()
    for dev in get_devices():
        request = add_device(request=request,
                             device_path=dev,
                             bucket=action_get("bucket"))
    send_request_if_needed(request, relation='mon')
コード例 #24
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        legacy = True
        if request_per_unit_key():
            legacy = False
            relation_set(relation_id=rid, key_name=key_name)
        try:
            rq = ceph.get_create_rgw_pools_rq(
                prefix=config('zone') or config('pool-prefix'))
        except ValueError as e:
            # The end user has most likely provided a invalid value for
            # a configuration option. Just log the traceback here, the
            # end user will be notified by assess_status() called at
            # the end of the hook execution.
            log('Caught ValueError, invalid value provided for '
                'configuration?: "{}"'.format(str(e)),
                level=DEBUG)
            return

        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid,
                               unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key, name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and new_keyring
                        and not multisite_deployment()):
                    log('Resume service "{}" as we now have keys for it.'.
                        format(service_name()),
                        level=DEBUG)
                    service_resume(service_name())

            if multisite_deployment():
                process_multisite_relations()
            elif (ready_for_service(legacy=legacy) and is_leader()
                  and 'mon' in CONFIGS.complete_contexts()):
                # In a non multi-site deployment create the
                # zone using the default zonegroup and restart the service
                internal_url = '{}:{}'.format(
                    canonical_url(CONFIGS, INTERNAL),
                    listen_port(),
                )
                endpoints = [internal_url]
                zonegroup = 'default'
                zone = config('zone')
                existing_zones = multisite.list_zones()
                log('Existing zones {}'.format(existing_zones), level=DEBUG)
                if zone not in existing_zones:
                    log("Zone '{}' doesn't exist, creating".format(zone))
                    try:
                        multisite.create_zone(zone,
                                              endpoints=endpoints,
                                              default=True,
                                              master=True,
                                              zonegroup=zonegroup)
                    except subprocess.CalledProcessError as e:
                        if 'File exists' in e.stderr.decode('UTF-8'):
                            # NOTE(lourot): may have been created in the
                            # background by the Rados Gateway daemon, see
                            # lp:1856106
                            log("Zone '{}' existed already after all".format(
                                zone))
                        else:
                            raise

                    existing_zones = multisite.list_zones(retry_on_empty=True)
                    log('Existing zones {}'.format(existing_zones),
                        level=DEBUG)
                    if zone not in existing_zones:
                        raise RuntimeError(
                            "Could not create zone '{}'".format(zone))

                    service_restart(service_name())
        else:
            send_request_if_needed(rq, relation='mon')
コード例 #25
0
ファイル: add_disk.py プロジェクト: xavpaice/charm-ceph-osd
    if hookenv.config('autotune'):
        ceph.utils.tune_dev(dev)
    mounts = filter(lambda disk: device_path
                    in disk.device, psutil.disk_partitions())
    for osd in mounts:
        osd_id = osd.mountpoint.split('/')[-1].split('-')[-1]
        request.ops.append({
            'op': 'move-osd-to-bucket',
            'osd': "osd.{}".format(osd_id),
            'bucket': bucket})
    return request


def get_devices():
    devices = []
    for path in hookenv.action_get('osd-devices').split(' '):
        path = path.strip()
        if os.path.isabs(path):
            devices.append(path)

    return devices


if __name__ == "__main__":
    request = ch_ceph.CephBrokerRq()
    for dev in get_devices():
        request = add_device(request=request,
                             device_path=dev,
                             bucket=hookenv.action_get("bucket"))
    ch_ceph.send_request_if_needed(request, relation='mon')