def mon_relation(): emit_cephconf() moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() notify_osds() notify_radosgws() notify_client() status_set("active", "Ceph Monitor quorum is ready") else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts())))
def mon_relation(): if leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') status_set('waiting', 'Waiting for leader to setup keys') return emit_cephconf() moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() ceph.wait_for_quorum() # If we can and want to if is_leader() and config('customize-failure-domain'): # But only if the environment supports it if os.environ.get('JUJU_AVAILABILITY_ZONE'): cmds = [ "ceph osd getcrushmap -o /tmp/crush.map", "crushtool -d /tmp/crush.map| " "sed 's/step chooseleaf firstn 0 type host/step " "chooseleaf firstn 0 type rack/' > " "/tmp/crush.decompiled", "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", "crushtool -i /tmp/crush.map --test", "ceph osd setcrushmap -i /tmp/crush.map" ] for cmd in cmds: try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as e: log("Failed to modify crush map:", level='error') log("Cmd: {}".format(cmd), level='error') log("Error: {}".format(e.output), level='error') break else: log( "Your Juju environment doesn't" "have support for Availability Zones" ) notify_osds() notify_radosgws() notify_client() else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts())))
def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() # Check if an upgrade was requested check_for_upgrade() log('Monitor hosts are ' + repr(get_mon_hosts())) sysctl_dict = config('sysctl') if sysctl_dict: create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') if relations_of_type('nrpe-external-master'): update_nrpe_config() if is_leader(): if not leader_get('fsid') or not leader_get('monitor-secret'): if config('fsid'): fsid = config('fsid') else: fsid = "{}".format(uuid.uuid1()) if config('monitor-secret'): mon_secret = config('monitor-secret') else: mon_secret = "{}".format(ceph.generate_monitor_secret()) status_set('maintenance', 'Creating FSID and Monitor Secret') opts = { 'fsid': fsid, 'monitor-secret': mon_secret, } log("Settings for the cluster are: {}".format(opts)) leader_set(opts) else: if leader_get('fsid') is None or leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') status_set('waiting', 'Waiting for leader to setup keys') sys.exit(0) emit_cephconf() # Support use of single node ceph if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1: status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() install_apparmor_profile()
def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks if not config('fsid'): log('No fsid supplied, cannot proceed.', level=ERROR) sys.exit(1) if not config('monitor-secret'): log('No monitor-secret supplied, cannot proceed.', level=ERROR) sys.exit(1) if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) sysctl_dict = config('sysctl') if sysctl_dict: create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') emit_cephconf() e_mountpoint = config('ephemeral-unmount') if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) osd_journal = get_osd_journal() if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() storage_changed() if relations_of_type('nrpe-external-master'): update_nrpe_config()
def mon_relation(): if leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') status_set('waiting', 'Waiting for leader to setup keys') return emit_cephconf() moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() ceph.wait_for_quorum() notify_osds() notify_radosgws() notify_client() else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts())))
def mon_relation(): emit_cephconf() moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), get_osd_journal(), reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) ceph.wait_for_quorum() notify_osds() notify_radosgws() notify_client() else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts())))
def mon_relation(): if leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') status_set('waiting', 'Waiting for leader to setup keys') return emit_cephconf() moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() ceph.wait_for_quorum() # If we can and want to if is_leader() and config('customize-failure-domain'): # But only if the environment supports it if os.environ.get('JUJU_AVAILABILITY_ZONE'): cmds = [ "ceph osd getcrushmap -o /tmp/crush.map", "crushtool -d /tmp/crush.map| " "sed 's/step chooseleaf firstn 0 type host/step " "chooseleaf firstn 0 type rack/' > " "/tmp/crush.decompiled", "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", "crushtool -i /tmp/crush.map --test", "ceph osd setcrushmap -i /tmp/crush.map" ] for cmd in cmds: try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as e: log("Failed to modify crush map:", level='error') log("Cmd: {}".format(cmd), level='error') log("Error: {}".format(e.output), level='error') break else: log("Your Juju environment doesn't" "have support for Availability Zones") notify_osds() notify_radosgws() notify_client() else: log('Not enough mons ({}), punting.'.format(len(get_mon_hosts())))
def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() log('Monitor hosts are ' + repr(get_mon_hosts())) sysctl_dict = config('sysctl') if sysctl_dict: create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') if relations_of_type('nrpe-external-master'): update_nrpe_config() if is_leader(): if not leader_get('fsid') or not leader_get('monitor-secret'): if config('fsid'): fsid = config('fsid') else: fsid = "{}".format(uuid.uuid1()) if config('monitor-secret'): mon_secret = config('monitor-secret') else: mon_secret = "{}".format(ceph.generate_monitor_secret()) status_set('maintenance', 'Creating FSID and Monitor Secret') opts = { 'fsid': fsid, 'monitor-secret': mon_secret, } log("Settings for the cluster are: {}".format(opts)) leader_set(opts) else: if leader_get('fsid') is None or leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') status_set('waiting', 'Waiting for leader to setup keys') sys.exit(0) emit_cephconf() # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap()
def mon_relation(): log('Begin mon-relation hook.') emit_cephconf() moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), reformat_osd()) ceph.start_osds(get_devices()) notify_osds() notify_radosgws() notify_client() else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts()))) log('End mon-relation hook.')
def config_changed(): log('Begin config-changed hook.') log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks if not config('fsid'): log('No fsid supplied, cannot proceed.', level=ERROR) sys.exit(1) if not config('monitor-secret'): log('No monitor-secret supplied, cannot proceed.', level=ERROR) sys.exit(1) if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) emit_cephconf() e_mountpoint = config('ephemeral-unmount') if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) osd_journal = config('osd-journal') if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), reformat_osd()) ceph.start_osds(get_devices()) log('End config-changed hook.')