def upgrade_master_node(self, rollback=False, file_upload=True): """This method upgrades master node with current state.""" # TODO: It will be remooved or changed master = self.admin_remote if file_upload: checkers.upload_tarball(master, hlp_data.TARBALL_PATH, '/var') checkers.check_file_exists(master, os.path.join( '/var', os.path.basename(hlp_data. TARBALL_PATH))) self.untar(master, os.path.basename(hlp_data.TARBALL_PATH), '/var') keystone_pass = hlp_data.KEYSTONE_CREDS['password'] checkers.run_upgrade_script(master, '/var', 'upgrade.sh', password=keystone_pass, rollback=rollback, exit_code=255 if rollback else 0) if not rollback: checkers.wait_upgrade_is_done(master, 3000, phrase='***UPGRADING MASTER NODE' ' DONE SUCCESSFULLY') checkers.check_upgraded_containers(master, hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) elif rollback: checkers.wait_rollback_is_done(master, 3000) checkers.check_upgraded_containers(master, hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok")
def upgrade_master_node(self, rollback=False, file_upload=True): """This method upgrades master node with current state.""" # TODO: It will be remooved or changed master = self.admin_remote if file_upload: checkers.upload_tarball(master, hlp_data.TARBALL_PATH, '/var') checkers.check_file_exists( master, os.path.join('/var', os.path.basename(hlp_data.TARBALL_PATH))) self.untar(master, os.path.basename(hlp_data.TARBALL_PATH), '/var') keystone_pass = hlp_data.KEYSTONE_CREDS['password'] checkers.run_upgrade_script(master, '/var', 'upgrade.sh', password=keystone_pass, rollback=rollback, exit_code=255 if rollback else 0) if not rollback: checkers.wait_upgrade_is_done(master, 3000, phrase='***UPGRADING MASTER NODE' ' DONE SUCCESSFULLY') checkers.check_upgraded_containers(master, hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) elif rollback: checkers.wait_rollback_is_done(master, 3000) checkers.check_upgraded_containers(master, hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok")
def rollback_automatically_ha_env(self): """Rollback manually simple deployed cluster Scenario: 1. Revert snapshot with simple neutron gre ha env 2. Add raise exception to openstack.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded 6. Add 1 cinder node and re-deploy cluster 7. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'deploy_neutron_gre_ha'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre_ha") cluster_id = self.fuel_web.get_last_created_cluster() checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') self.fuel_web.modify_python_file(self.env.get_admin_remote(), "61i \ \ \ \ \ \ \ \ raise errors." "ExecutedErrorNonZeroExitCode('{0}')" .format('Some bad error'), '/var/upgrade/site-packages/' 'fuel_upgrade/engines/' 'openstack.py') checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password= hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.nodes().slaves[0]), timeout=120) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:5]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.env.bootstrap_nodes(self.env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['cinder']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_ha")
def rollback_automatically_simple_env(self): """Rollback automatically simple deployed cluster Scenario: 1. Revert snapshot with simple neutron gre env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded and run OSTf 6. Add 1 cinder node and re-deploy cluster 7. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'deploy_neutron_gre'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre") cluster_id = self.fuel_web.get_last_created_cluster() checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') self.fuel_web.modify_python_file(self.env.get_admin_remote(), "98i \ \ \ \ \ \ \ \ raise errors." "ExecutedErrorNonZeroExitCode('{0}')" .format('Some bad error'), '/var/upgrade/site-packages/' 'fuel_upgrade/engines/' 'docker_engine.py') #we expect 255 exit code here because upgrade failed # and exit status is 255 checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.run_ostf( cluster_id=cluster_id, should_fail=1, failed_test_name=['Create volume and attach it to instance']) self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['cinder']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_simple")
def rollback_automatically_ha_env(self): """Rollback manually simple deployed cluster Scenario: 1. Revert snapshot with simple neutron gre ha env 2. Add raise exception to openstack.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded 6. Add 1 cinder node and re-deploy cluster 7. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'deploy_neutron_gre_ha'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre_ha") cluster_id = self.fuel_web.get_last_created_cluster() checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') self.fuel_web.modify_python_file(self.env.get_admin_remote(), "2i \ \ \ \ 2014.2-6.0: blah-blah", '/var/upgrade/releases/' 'metadata.yaml') checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password= hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.nodes().slaves[0]), timeout=120) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:5]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.env.bootstrap_nodes(self.env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['cinder']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_ha")
def rollback_automatically_delete_node(self): """Rollback automatically ha one controller deployed cluster and delete node from cluster Scenario: 1. Revert snapshot with deploy neutron gre env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded 6. Run network verification 7. Run OSTF 8. Delete 1 node and re-deploy cluster 9. Run OSTF """ if not self.env.d_env.has_snapshot('deploy_neutron_gre'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre") cluster_id = self.fuel_web.get_last_created_cluster() checkers.upload_tarball(self.env.d_env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') # we expect 255 exit code here because upgrade failed # and exit status is 255 checkers.run_script(self.env.d_env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.d_env.nodes().slaves[0]), timeout=8 * 60) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) nailgun_nodes = self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute', 'cinder']}, False, True) task = self.fuel_web.deploy_cluster(cluster_id) self.fuel_web.assert_task_success(task) nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes) try: wait(lambda: len(self.fuel_web.client.list_nodes()) == 3, timeout=5 * 60) except TimeoutError: assert_true(len(self.fuel_web.client.list_nodes()) == 3, 'Node {0} is not discovered in timeout 10 *60'.format( nodes[0])) self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1) self.env.make_snapshot("rollback_automatically_delete_mode")
def rollback_automatically_ha_one_controller_env(self): """Rollback automatically ha one controller deployed cluster Scenario: 1. Revert snapshot with deploy neutron gre env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded 6. Run network verification 7. Run OSTF 8. Add 1 ceph node and re-deploy cluster 9. Run OSTF """ if not self.env.d_env.has_snapshot('ceph_multinode_compact'): raise SkipTest() self.env.revert_snapshot("ceph_multinode_compact") cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.upload_tarball(self.env.d_env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') # we expect 255 exit code here because upgrade failed # and exit status is 255 checkers.run_script(self.env.d_env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.d_env.nodes().slaves[0]), timeout=8 * 60) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['ceph-osd']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_ha_one_controller")
def rollback_automatically_simple_env(self): """Rollback automatically simple deployed cluster Scenario: 1. Revert snapshot with simple neutron gre env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded and run OSTf 6. Add 1 cinder node and re-deploy cluster 7. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'deploy_neutron_gre'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre") cluster_id = self.fuel_web.get_last_created_cluster() remote = self.env.get_ssh_to_remote_by_name('slave-01') expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') #we expect 255 exit code here because upgrade failed # and exit status is 255 checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.nodes().slaves[0]), timeout=120) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes(cluster_id, {'slave-04': ['cinder']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: remote = self.env.get_ssh_to_remote_by_name('slave-04') kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_simple")
def rollback_automatically_ha(self): """Rollback manually ha deployed cluster Scenario: 1. Revert snapshot with Neutron GRE HA 6.1 env 2. Add raise exception to openstack.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded 6. Run network verification 7. Run OSTF 8. Add 1 cinder node and re-deploy cluster 9. Run OSTF """ #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0 if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre_ha") cluster_id = self.fuel_web.get_last_created_cluster() checkers.upload_tarball(self.env.d_env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_file_exists(self.env.d_env.get_admin_remote(), os.path.join('/var', os.path.basename( hlp_data.TARBALL_PATH))) checkers.untar(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.run_script(self.env.d_env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.d_env.nodes().slaves[0]), timeout=8 * 60) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:5]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['cinder']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("rollback_automatically_ha")
def upgrade_fuel_after_rollback(self): """Upgrade Fuel after rollback and deploy new cluster Scenario: 1. Revert deploy_neutron_gre snapshot with 6.1 env 2. Upgrade with rollback 3. Run OSTF 4. Run network verification 5. Upgrade fuel master 6. Check upgrading was successful 7. Deploy 6.1 cluster with 3 nodes and neutron vlan 8. Run OSTF for new cluster 9. Run network verification """ #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0 if not self.env.d_env.has_snapshot('deploy_neutron_gre'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre") available_releases_before = self.fuel_web.get_releases_list_for_os( release_name=hlp_data.OPENSTACK_RELEASE) remote = self.env.d_env.get_admin_remote cluster_id = self.fuel_web.get_last_created_cluster() checkers.upload_tarball(remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_file_exists(remote(), os.path.join('/var', os.path.basename( hlp_data.TARBALL_PATH))) checkers.untar(remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') # Upgrade with rollback keystone_pass = hlp_data.KEYSTONE_CREDS['password'] checkers.run_script(remote(), '/var', 'upgrade.sh', password=keystone_pass, rollback=True, exit_code=255) checkers.wait_rollback_is_done(remote(), 3000) checkers.check_upgraded_containers(remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.d_env.nodes().slaves[0]), timeout=8 * 60) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity']) # Upgrade fuel master checkers.run_script(remote(), '/var', 'upgrade.sh', password=keystone_pass) checkers.wait_upgrade_is_done(remote(), 3000, phrase='*** UPGRADING MASTER NODE' ' DONE SUCCESSFULLY') checkers.check_upgraded_containers(remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_nailgun_upgrade_migration() # Deploy new cluster available_releases_after = self.fuel_web.get_releases_list_for_os( release_name=hlp_data.OPENSTACK_RELEASE) added_release = [id for id in available_releases_after if id not in available_releases_before] self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:6]) new_cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, release_id=added_release[0], mode=hlp_data.DEPLOYMENT_MODE, settings={ 'net_provider': 'neutron', 'net_segment_type': hlp_data.NEUTRON_SEGMENT['vlan'] } ) self.fuel_web.update_nodes( new_cluster_id, { 'slave-04': ['controller'], 'slave-05': ['compute'], 'slave-06': ['cinder'] } ) self.fuel_web.run_network_verify(new_cluster_id) self.fuel_web.deploy_cluster_wait(new_cluster_id) self.fuel_web.run_ostf(new_cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.fuel_web.run_network_verify(new_cluster_id) self.env.make_snapshot("upgrade_fuel_after_rollback")
def rollback_automatically_simple_env(self): """Rollback automatically simple deployed cluster Scenario: 1. Revert snapshot with simple neutron gre env 2. Add raise exception to docker_engine.py file 3. Run upgrade on master 4. Check that rollback starts automatically 5. Check that cluster was not upgraded and run OSTf 6. Add 1 cinder node and re-deploy cluster 7. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'deploy_neutron_gre'): raise SkipTest() self.env.revert_snapshot("deploy_neutron_gre") cluster_id = self.fuel_web.get_last_created_cluster() remote = self.env.get_ssh_to_remote_by_name('slave-01') expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') self.fuel_web.modify_python_file(self.env.get_admin_remote(), "2i \ \ \ \ 2014.2-6.0: blah-blah", '/var/upgrade/releases/' 'metadata.yaml') #we expect 255 exit code here because upgrade failed # and exit status is 255 checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password= hlp_data.KEYSTONE_CREDS['password'], rollback=True, exit_code=255) checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000) checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_TO, hlp_data.UPGRADE_FUEL_FROM) logger.debug("all containers are ok") _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node( self.env.nodes().slaves[0]), timeout=120) logger.debug("all services are up now") self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3]) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['cinder']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: remote = self.env.get_ssh_to_remote_by_name('slave-04') kernel = UpgradeFuelMaster.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("rollback_automatic_simple")