def check_for_upgrade(): if not os.path.exists(_upgrade_keyring): log("Ceph upgrade keyring not detected, skipping upgrade checks.") return c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') log('old_version: {}'.format(old_version)) new_version = ceph.resolve_ceph_version(hookenv.config('source') or 'distro') log('new_version: {}'.format(new_version)) old_version_os = get_os_codename_install_source(c.previous('source') or 'distro') new_version_os = get_os_codename_install_source(hookenv.config('source')) # May be in a previous upgrade that was failed if the directories # still need an ownership update. Check this condition. resuming_upgrade = ceph.dirs_need_ownership_update('osd') if (ceph.UPGRADE_PATHS.get(old_version) == new_version) or\ resuming_upgrade: if old_version == new_version: log('Attempting to resume possibly failed upgrade.', INFO) else: log("{} to {} is a valid upgrade path. Proceeding.".format( old_version, new_version)) emit_cephconf(upgrading=True) ceph.roll_osd_cluster(new_version=new_version, upgrade_key='osd-upgrade') emit_cephconf(upgrading=False) notify_mon_of_upgrade(new_version) elif (old_version == new_version and old_version_os < new_version_os): # See LP: #1778823 add_source(hookenv.config('source'), hookenv.config('key')) log(("The installation source has changed yet there is no new major " "version of Ceph in this new source. As a result no package " "upgrade will take effect. Please upgrade manually if you need " "to."), level=INFO) else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " "Valid paths are: {}".format(old_version, new_version, ceph.pretty_print_upgrade_paths()), level=ERROR)
def check_for_upgrade(): if not os.path.exists(ceph._upgrade_keyring): log("Ceph upgrade keyring not detected, skipping upgrade checks.") return c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') log('old_version: {}'.format(old_version)) new_version = ceph.resolve_ceph_version(hookenv.config('source') or 'distro') log('new_version: {}'.format(new_version)) old_version_os = get_os_codename_install_source(c.previous('source') or 'distro') new_version_os = get_os_codename_install_source(hookenv.config('source')) # May be in a previous upgrade that was failed if the directories # still need an ownership update. Check this condition. resuming_upgrade = ceph.dirs_need_ownership_update('osd') if (ceph.UPGRADE_PATHS.get(old_version) == new_version) or\ resuming_upgrade: if old_version == new_version: log('Attempting to resume possibly failed upgrade.', INFO) else: log("{} to {} is a valid upgrade path. Proceeding.".format( old_version, new_version)) emit_cephconf(upgrading=True) ceph.roll_osd_cluster(new_version=new_version, upgrade_key='osd-upgrade') emit_cephconf(upgrading=False) elif (old_version == new_version and old_version_os < new_version_os): # See LP: #1778823 add_source(hookenv.config('source'), hookenv.config('key')) log(("The installation source has changed yet there is no new major " "version of Ceph in this new source. As a result no package " "upgrade will take effect. Please upgrade manually if you need " "to."), level=INFO) else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " "Valid paths are: {}".format(old_version, new_version, ceph.pretty_print_upgrade_paths()), level=ERROR)
def upgrade_available(): """Check for upgrade for ceph :returns: whether an upgrade is available :rtype: boolean """ c = config() old_version = ceph_utils.resolve_ceph_version(c.previous('source') or 'distro') new_version = ceph_utils.resolve_ceph_version(c.get('source')) if (old_version in ceph_utils.UPGRADE_PATHS and new_version == ceph_utils.UPGRADE_PATHS[old_version]): return True return False
def check_for_upgrade(): if not ceph.is_bootstrapped(): log("Ceph is not bootstrapped, skipping upgrade checks.") return c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') log('old_version: {}'.format(old_version)) new_version = ceph.resolve_ceph_version(hookenv.config('source') or 'distro') log('new_version: {}'.format(new_version)) # May be in a previous upgrade that was failed if the directories # still need an ownership update. Check this condition. resuming_upgrade = ceph.dirs_need_ownership_update('osd') if old_version == new_version and not resuming_upgrade: log("No new ceph version detected, skipping upgrade.", DEBUG) return if (ceph.UPGRADE_PATHS.get(old_version) == new_version) or\ resuming_upgrade: if old_version == new_version: log('Attempting to resume possibly failed upgrade.', INFO) else: log("{} to {} is a valid upgrade path. Proceeding.".format( old_version, new_version)) emit_cephconf(upgrading=True) ceph.roll_osd_cluster(new_version=new_version, upgrade_key='osd-upgrade') emit_cephconf(upgrading=False) else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " "Valid paths are: {}".format(old_version, new_version, ceph.pretty_print_upgrade_paths()))
def check_for_upgrade(): if not ceph.is_bootstrapped(): log("Ceph is not bootstrapped, skipping upgrade checks.") return c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') log('old_version: {}'.format(old_version)) # Strip all whitespace new_version = ceph.resolve_ceph_version(hookenv.config('source')) log('new_version: {}'.format(new_version)) if (old_version in ceph.UPGRADE_PATHS and new_version == ceph.UPGRADE_PATHS[old_version]): log("{} to {} is a valid upgrade path. Proceeding.".format( old_version, new_version)) ceph.roll_monitor_cluster(new_version=new_version, upgrade_key='admin') else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " "Valid paths are: {}".format(old_version, new_version, ceph.pretty_print_upgrade_paths()))
def check_for_upgrade(): if not ceph.is_bootstrapped(): log("Ceph is not bootstrapped, skipping upgrade checks.") return c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') log('old_version: {}'.format(old_version)) # Strip all whitespace new_version = ceph.resolve_ceph_version(hookenv.config('source')) old_version_os = get_os_codename_install_source( c.previous('source') or 'distro') new_version_os = get_os_codename_install_source(hookenv.config('source')) log('new_version: {}'.format(new_version)) if (old_version in ceph.UPGRADE_PATHS and new_version == ceph.UPGRADE_PATHS[old_version]): log("{} to {} is a valid upgrade path. Proceeding.".format( old_version, new_version)) ceph.roll_monitor_cluster(new_version=new_version, upgrade_key='admin') elif (old_version == new_version and old_version_os < new_version_os): # See LP: #1778823 add_source(hookenv.config('source'), hookenv.config('key')) log(("The installation source has changed yet there is no new major " "version of Ceph in this new source. As a result no package " "upgrade will take effect. Please upgrade manually if you need " "to."), level=INFO) else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " "Valid paths are: {}".format(old_version, new_version, ceph.pretty_print_upgrade_paths()), level=ERROR)
def prepare_disks_and_activate(): # NOTE: vault/vaultlocker preflight check vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) context = vault_kv() if use_vaultlocker() and not vault_kv.complete: log('Deferring OSD preparation as vault not ready', level=DEBUG) return elif use_vaultlocker() and vault_kv.complete: log('Vault ready, writing vaultlocker configuration', level=DEBUG) vaultlocker.write_vaultlocker_conf(context) osd_journal = get_journal_devices() if not osd_journal.isdisjoint(set(get_devices())): raise ValueError('`osd-journal` and `osd-devices` options must not' 'overlap.') log("got journal devs: {}".format(osd_journal), level=DEBUG) # pre-flight check of eligible device pristinity devices = get_devices() # if a device has been previously touched we need to consider it as # non-pristine. If it needs to be re-processed it has to be zapped # via the respective action which also clears the unitdata entry. db = kv() touched_devices = db.get('osd-devices', []) devices = [dev for dev in devices if dev not in touched_devices] log('Skipping osd devices previously processed by this unit: {}' .format(touched_devices)) # filter osd-devices that are file system paths devices = [dev for dev in devices if dev.startswith('/dev')] # filter osd-devices that does not exist on this unit devices = [dev for dev in devices if os.path.exists(dev)] # filter osd-devices that are already mounted devices = [dev for dev in devices if not is_device_mounted(dev)] # filter osd-devices that are active bluestore devices devices = [dev for dev in devices if not ceph.is_active_bluestore_device(dev)] log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG) if not all(ceph.is_pristine_disk(dev) for dev in devices): status_set('blocked', 'Non-pristine devices detected, consult ' '`list-disks`, `zap-disk` and `blacklist-*` actions.') return if is_osd_bootstrap_ready(): log('ceph bootstrapped, rescanning disks') emit_cephconf() bluestore = use_bluestore() ceph.udevadm_settle() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, config('ignore-device-errors'), config('osd-encrypt'), bluestore, config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): ceph.tune_dev(dev) ceph.start_osds(get_devices()) # Notify MON cluster as to how many OSD's this unit bootstrapped # into the cluster for r_id in relation_ids('mon'): relation_set( relation_id=r_id, relation_settings={ 'bootstrapped-osds': len(db.get('osd-devices', [])), 'ceph_release': ceph.resolve_ceph_version( hookenv.config('source') or 'distro' ) } )