def upgrade_ha_one_controller(self): """Upgrade ha one controller deployed cluster with ceph Scenario: 1. Revert snapshot with ha one controller ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Run network verification 5. Run OSTF 6. Add another compute node 7. Re-deploy cluster 8. Run OSTF """ if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'): raise SkipTest() self.env.revert_snapshot('ceph_ha_one_controller_compact') cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] with self.env.d_env.get_ssh_to_remote(_ip) as remote: expected_kernel = self.get_slave_kernel(remote) self.env.admin_actions.upgrade_master_node() self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] with self.env.d_env.get_ssh_to_remote(_ip) as remote: kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot( self.env, "pass", "upgrade_ha_one_controller") self.env.make_snapshot("upgrade_ha_one_controller")
def rollback_automatically_ha_one_controller(self): """Rollback automatically ha one controller deployed cluster Scenario: 1. Revert snapshot with deploy Neutron VXLAN env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded 6. Run network verification 7. Run OSTF 8. Add 1 ceph node and re-deploy cluster 9. Run OSTF """ if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'): raise SkipTest() self.env.revert_snapshot('ceph_ha_one_controller_compact') cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] with self.env.d_env.get_ssh_to_remote(_ip) as remote: expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote) self.env.admin_actions.upgrade_master_node(rollback=True) _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.d_env.nodes().slaves[0]), timeout=8 * 60) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['ceph-osd']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] with self.env.d_env.get_ssh_to_remote(_ip) as remote: kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("rollback_automatically_ha_one_controller")
def check_upgraded_kernel(cls, admin_remote, slave_remote): #the archive contains several versions of the kernel #regular expression will pick the newer one #that is actually gets installed cmd = r"find /var/upgrade/repos/*/ubuntu/ -type f -name" \ r" 'linux-image-*.deb' -printf '%f\n' | sed -rn " \ r"'s/^linux-image-([0-9, \.]+(\-[0-9]+)?)-.*/\1/p' |" \ r" sort -rV | " \ r"head -1" expected_kernel = ''.join(admin_remote.execute(cmd)['stdout']).rstrip() logger.debug( "kernel version from repos is {0}".format(expected_kernel)) kernel = UpgradeFuelMaster.get_slave_kernel(slave_remote) checkers.check_kernel(kernel, expected_kernel)
def check_upgraded_kernel(cls, admin_remote, slave_remote): #the archive contains several versions of the kernel #regular expression will pick the newer one #that is actually gets installed cmd = r"find /var/upgrade/repos/*/ubuntu/ -type f -name" \ r" 'linux-image-*.deb' -printf '%f\n' | sed -rn " \ r"'s/^linux-image-([0-9, \.]+(\-[0-9]+)?)-.*/\1/p' |" \ r" sort -rV | " \ r"head -1" expected_kernel = ''.join(admin_remote.execute( cmd)['stdout']).rstrip() logger.debug("kernel version from repos is {0}".format( expected_kernel)) kernel = UpgradeFuelMaster.get_slave_kernel(slave_remote) checkers.check_kernel(kernel, expected_kernel)
def upgrade_ha_one_controller_env(self): """Upgrade ha one controller deployed cluster with ceph Scenario: 1. Revert snapshot with ha one controller ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Run network verification 5. Run OSTF 6. Add another compute node 7. Re-deploy cluster 8. Run OSTF """ # For upgrade jobs *from* 6.1, change snapshot name to # "ceph_ha_one_controller_compact" if not self.env.d_env.has_snapshot('ceph_multinode_compact'): raise SkipTest() self.env.revert_snapshot("ceph_multinode_compact") cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) expected_kernel = self.get_slave_kernel(remote) checkers.upload_tarball(self.env.d_env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.run_script(self.env.d_env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password']) checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000, phrase='*** UPGRADING MASTER NODE' ' DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), user='******', tenant='ceph1', passwd='ceph1') self.fuel_web.assert_cluster_ready( os_conn, smiles_count=7 if hlp_data.NEUTRON_ENABLE else 10, networks_count=2 if hlp_data.NEUTRON_ENABLE else 1, timeout=300) self.fuel_web.run_ostf(cluster_id=cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot( self.env, "pass", "upgrade_ha_one_controller_env") self.env.make_snapshot("upgrade_ha_one_controller")
def rollback_automatically_ha_one_controller_env(self): """Rollback automatically ha one controller deployed cluster Scenario: 1. Revert snapshot with deploy neutron gre env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded 6. Run network verification 7. Run OSTF 8. Add 1 ceph node and re-deploy cluster 9. Run OSTF """ if not self.env.d_env.has_snapshot('ceph_multinode_compact'): raise SkipTest() self.env.revert_snapshot("ceph_multinode_compact") cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.upload_tarball(self.env.d_env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') # we expect 255 exit code here because upgrade failed # and exit status is 255 checkers.run_script(self.env.d_env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.d_env.nodes().slaves[0]), timeout=8 * 60) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['ceph-osd']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_ha_one_controller")
def upgrade_simple_env(self): """Upgrade simple deployed cluster with ceph Scenario: 1. Revert snapshot with simple ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Add another compute node 5. Re-deploy cluster 6. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'ceph_multinode_compact'): raise SkipTest() self.env.revert_snapshot("ceph_multinode_compact") cluster_id = self.fuel_web.get_last_created_cluster() remote = self.env.get_ssh_to_remote_by_name('slave-01') expected_kernel = self.get_slave_kernel(remote) checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password']) checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000, phrase='*** UPGRADE DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3]) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes(cluster_id, {'slave-04': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) os_conn = os_actions.OpenStackActions( self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'], user='******', tenant='ceph1', passwd='ceph1') self.fuel_web.assert_cluster_ready(os_conn, smiles_count=10, networks_count=1, timeout=300) self.fuel_web.run_ostf(cluster_id=cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: remote = self.env.get_ssh_to_remote_by_name('slave-04') kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env") self.env.make_snapshot("upgrade_simple")
def rollback_automatically_simple_env(self): """Rollback automatically simple deployed cluster Scenario: 1. Revert snapshot with simple neutron gre env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded and run OSTf 6. Add 1 cinder node and re-deploy cluster 7. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'deploy_neutron_gre'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre") cluster_id = self.fuel_web.get_last_created_cluster() remote = self.env.get_ssh_to_remote_by_name('slave-01') expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') #we expect 255 exit code here because upgrade failed # and exit status is 255 checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.nodes().slaves[0]), timeout=120) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes(cluster_id, {'slave-04': ['cinder']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: remote = self.env.get_ssh_to_remote_by_name('slave-04') kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_simple")
def upgrade_ha_one_controller(self): """Upgrade ha one controller deployed cluster with ceph Scenario: 1. Revert snapshot with ha one controller ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Run network verification 5. Run OSTF 6. Add another compute node 7. Re-deploy cluster 8. Run OSTF """ if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'): raise SkipTest() self.env.revert_snapshot('ceph_ha_one_controller_compact') cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) expected_kernel = self.get_slave_kernel(remote) checkers.upload_tarball(self.env.d_env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_file_exists(self.env.d_env.get_admin_remote(), os.path.join('/var', os.path.basename( hlp_data.TARBALL_PATH))) checkers.untar(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.run_script(self.env.d_env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password']) checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000, phrase='*** UPGRADING MASTER NODE' ' DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot( self.env, "pass", "upgrade_ha_one_controller") self.env.make_snapshot("upgrade_ha_one_controller")
def upgrade_simple_env(self): """Upgrade simple deployed cluster with ceph Scenario: 1. Revert snapshot with simple ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Add another compute node 5. Re-deploy cluster 6. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'ceph_multinode_compact'): raise SkipTest() self.env.revert_snapshot("ceph_multinode_compact") cluster_id = self.fuel_web.get_last_created_cluster() remote = self.env.get_ssh_to_remote_by_name('slave-01') expected_kernel = self.get_slave_kernel(remote) checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password= hlp_data.KEYSTONE_CREDS['password']) checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000, phrase='*** UPGRADE DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) os_conn = os_actions.OpenStackActions( self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'], user='******', tenant='ceph1', passwd='ceph1') self.fuel_web.assert_cluster_ready( os_conn, smiles_count=10, networks_count=1, timeout=300) self.fuel_web.run_ostf(cluster_id=cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: remote = self.env.get_ssh_to_remote_by_name('slave-04') kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env") self.env.make_snapshot("upgrade_simple")
def rollback_automatically_simple_env(self): """Rollback automatically simple deployed cluster Scenario: 1. Revert snapshot with simple neutron gre env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded and run OSTf 6. Add 1 cinder node and re-deploy cluster 7. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'deploy_neutron_gre'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre") cluster_id = self.fuel_web.get_last_created_cluster() remote = self.env.get_ssh_to_remote_by_name('slave-01') expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') self.fuel_web.modify_python_file(self.env.get_admin_remote(), "2i \ \ \ \ 2014.2-6.0: blah-blah", '/var/upgrade/releases/' 'metadata.yaml') #we expect 255 exit code here because upgrade failed # and exit status is 255 checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password= hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.nodes().slaves[0]), timeout=120) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['cinder']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: remote = self.env.get_ssh_to_remote_by_name('slave-04') kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_simple")