def check_for_upgrade(): if not ceph.is_bootstrapped(): log("Ceph is not bootstrapped, skipping upgrade checks.") return release_info = host.lsb_release() if not release_info['DISTRIB_CODENAME'] == 'trusty': log("Invalid upgrade path from {}. Only trusty is currently " "supported".format(release_info['DISTRIB_CODENAME'])) return c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') log('old_version: {}'.format(old_version)) new_version = ceph.resolve_ceph_version( hookenv.config('source') or 'distro') log('new_version: {}'.format(new_version)) # May be in a previous upgrade that was failed if the directories # still need an ownership update. Check this condition. resuming_upgrade = ceph.dirs_need_ownership_update('osd') if old_version == new_version and not resuming_upgrade: log("No new ceph version detected, skipping upgrade.", DEBUG) return if (ceph.UPGRADE_PATHS.get(old_version) == new_version) or\ resuming_upgrade: if old_version == new_version: log('Attempting to resume possibly failed upgrade.', INFO) else: log("{} to {} is a valid upgrade path. Proceeding.".format( old_version, new_version)) emit_cephconf(upgrading=True) ceph.roll_osd_cluster(new_version=new_version, upgrade_key='osd-upgrade') emit_cephconf(upgrading=False) else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " "Valid paths are: {}".format(old_version, new_version, ceph.pretty_print_upgrade_paths()))
def test_roll_osd_cluster_second(self, wait_on_previous_node, get_upgrade_position, lock_and_roll, status_set, socket, get_osd_tree): wait_on_previous_node.return_value = None socket.gethostname.return_value = "ip-192-168-1-3" get_osd_tree.return_value = [ CrushLocation( name="ip-192-168-1-2", identifier='a', host='host-a', rack='rack-a', row='row-a', datacenter='dc-1', chassis='chassis-a', root='ceph'), CrushLocation( name="ip-192-168-1-3", identifier='a', host='host-b', rack='rack-a', row='row-a', datacenter='dc-1', chassis='chassis-a', root='ceph') ] get_upgrade_position.return_value = 1 ceph.roll_osd_cluster(new_version='0.94.1', upgrade_key='osd-upgrade') status_set.assert_called_with( 'blocked', 'Waiting on ip-192-168-1-2 to finish upgrading') lock_and_roll.assert_called_with(my_name='ip-192-168-1-3', service='osd', upgrade_key='osd-upgrade', version='0.94.1')
def test_roll_osd_cluster_first(self, get_upgrade_position, lock_and_roll, log, get_osd_tree, socket): socket.gethostname.return_value = "ip-192-168-1-2" get_osd_tree.return_value = "" get_upgrade_position.return_value = 0 ceph.roll_osd_cluster(new_version='0.94.1', upgrade_key='osd-upgrade') log.assert_has_calls( [ call('roll_osd_cluster called with 0.94.1'), call('osd_sorted_list: []'), call('upgrade position: 0') ] ) lock_and_roll.assert_called_with(my_name="ip-192-168-1-2", version="0.94.1", upgrade_key='osd-upgrade', service='osd')