def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) except SkipTest: raise SkipTest() except Exception as test_exception: exc_trace = sys.exc_traceback name = 'error_%s' % func.__name__ description = "Failed in method '%s'." % func.__name__ if args[0].env is not None: try: create_diagnostic_snapshot(args[0].env, "fail", name) except: logger.error("Fetching of diagnostic snapshot failed: {0}". format(traceback.format_exc())) try: with args[0].env.d_env.get_admin_remote()\ as admin_remote: pull_out_logs_via_ssh(admin_remote, name) except: logger.error("Fetching of raw logs failed: {0}". format(traceback.format_exc())) finally: logger.debug(args) try: args[0].env.make_snapshot(snapshot_name=name[-50:], description=description, is_make=True) except: logger.error("Error making the environment snapshot:" " {0}".format(traceback.format_exc())) raise test_exception, None, exc_trace return result
def upgrade_simple_env(self): """Upgrade simple deployed cluster with ceph Scenario: 1. Revert snapshot with simple ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Add another compute node 5. Re-deploy cluster 6. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'ceph_multinode_compact'): raise SkipTest() self.env.revert_snapshot("ceph_multinode_compact") cluster_id = self.fuel_web.get_last_created_cluster() checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password= hlp_data.KEYSTONE_CREDS['password']) checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000, phrase='*** UPGRADE DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) os_conn = os_actions.OpenStackActions( self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'], user='******', tenant='ceph1', passwd='ceph1') self.fuel_web.assert_cluster_ready( os_conn, smiles_count=10, networks_count=1, timeout=300) self.fuel_web.run_ostf(cluster_id=cluster_id) create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env") self.env.make_snapshot("upgrade_simple")
def upgrade_ha_one_controller(self): """Upgrade ha one controller deployed cluster with ceph Scenario: 1. Revert snapshot with ha one controller ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Run network verification 5. Run OSTF 6. Add another compute node 7. Re-deploy cluster 8. Run OSTF """ if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'): raise SkipTest() self.env.revert_snapshot('ceph_ha_one_controller_compact') cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] with self.env.d_env.get_ssh_to_remote(_ip) as remote: expected_kernel = self.get_slave_kernel(remote) self.env.admin_actions.upgrade_master_node() self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] with self.env.d_env.get_ssh_to_remote(_ip) as remote: kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot( self.env, "pass", "upgrade_ha_one_controller") self.env.make_snapshot("upgrade_ha_one_controller")
def upgrade_simple_env(self): """Upgrade simple deployed cluster Scenario: 1. Revert snapshot with simple sinder env 2. Run upgrade on master 3. Check that upgrade was successful 4. Add another cinder node 5. Re-deploy cluster 6. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'deploy_simple_cinder'): raise SkipTest() self.env.revert_snapshot("deploy_simple_cinder") cluster_id = self.fuel_web.get_last_created_cluster() checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.UPGRADE_TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data. UPGRADE_TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data. UPGRADE_TARBALL_PATH), '/var') checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh') checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 1500, phrase='*** UPGRADE DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.assert_cluster_ready( 'slave-01', smiles_count=8, networks_count=1, timeout=300) self.fuel_web.run_ostf(cluster_id=cluster_id) create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env") self.env.make_snapshot("upgrade_simple")
def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) except SkipTest: raise SkipTest() except Exception as test_exception: exc_trace = sys.exc_traceback name = 'error_%s' % func.__name__ case_name = getattr(func, '_base_class', None) step_num = getattr(func, '_step_num', None) config_name = getattr(func, '_config_case_group', None) description = "Failed in method '%s'." % func.__name__ if args[0].env is not None: try: create_diagnostic_snapshot(args[0].env, "fail", name) except: logger.error("Fetching of diagnostic snapshot failed: {0}". format(traceback.format_exc())) try: with args[0].env.d_env.get_admin_remote()\ as admin_remote: pull_out_logs_via_ssh(admin_remote, name) except: logger.error("Fetching of raw logs failed: {0}". format(traceback.format_exc())) finally: logger.debug(args) try: if all([case_name, step_num, config_name]): _hash = hashlib.sha256(config_name) _hash = _hash.hexdigest()[:8] snapshot_name = "{case}_{config}_{step}".format( case=case_name, config=_hash, step="Step{:03d}".format(step_num) ) else: snapshot_name = name[-50:] args[0].env.make_snapshot(snapshot_name=snapshot_name, description=description, is_make=True) except: logger.error("Error making the environment snapshot:" " {0}".format(traceback.format_exc())) raise test_exception, None, exc_trace return result
def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) except SkipTest: raise SkipTest() except Exception as test_exception: exc_trace = sys.exc_traceback name = 'error_%s' % func.__name__ case_name = getattr(func, '_base_class', None) step_num = getattr(func, '_step_num', None) config_name = getattr(func, '_config_case_group', None) description = "Failed in method '%s'." % func.__name__ if args[0].env is not None: try: create_diagnostic_snapshot(args[0].env, "fail", name) except: logger.error( "Fetching of diagnostic snapshot failed: {0}".format( traceback.format_exc())) try: with args[0].env.d_env.get_admin_remote()\ as admin_remote: pull_out_logs_via_ssh(admin_remote, name) except: logger.error("Fetching of raw logs failed: {0}".format( traceback.format_exc())) finally: logger.debug(args) try: if all([case_name, step_num, config_name]): _hash = hashlib.sha256(config_name) _hash = _hash.hexdigest()[:8] snapshot_name = "{case}_{config}_{step}".format( case=case_name, config=_hash, step="Step{:03d}".format(step_num)) else: snapshot_name = name[-50:] args[0].env.make_snapshot(snapshot_name=snapshot_name, description=description, is_make=True) except: logger.error("Error making the environment snapshot:" " {0}".format(traceback.format_exc())) raise test_exception, None, exc_trace return result
def make_diagnostic_snapshot(self, status, name): create_diagnostic_snapshot(self.env, status, name)
def upgrade_ha_one_controller_env(self): """Upgrade ha one controller deployed cluster with ceph Scenario: 1. Revert snapshot with ha one controller ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Run network verification 5. Run OSTF 6. Add another compute node 7. Re-deploy cluster 8. Run OSTF """ # For upgrade jobs *from* 6.1, change snapshot name to # "ceph_ha_one_controller_compact" if not self.env.d_env.has_snapshot('ceph_multinode_compact'): raise SkipTest() self.env.revert_snapshot("ceph_multinode_compact") cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) expected_kernel = self.get_slave_kernel(remote) checkers.upload_tarball(self.env.d_env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.untar(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.run_script(self.env.d_env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password']) checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000, phrase='*** UPGRADING MASTER NODE' ' DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), user='******', tenant='ceph1', passwd='ceph1') self.fuel_web.assert_cluster_ready( os_conn, smiles_count=7 if hlp_data.NEUTRON_ENABLE else 10, networks_count=2 if hlp_data.NEUTRON_ENABLE else 1, timeout=300) self.fuel_web.run_ostf(cluster_id=cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot( self.env, "pass", "upgrade_ha_one_controller_env") self.env.make_snapshot("upgrade_ha_one_controller")
def upgrade_simple_env(self): """Upgrade simple deployed cluster with ceph Scenario: 1. Revert snapshot with simple ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Add another compute node 5. Re-deploy cluster 6. Run OSTF """ if not self.env.get_virtual_environment().has_snapshot( 'ceph_multinode_compact'): raise SkipTest() self.env.revert_snapshot("ceph_multinode_compact") cluster_id = self.fuel_web.get_last_created_cluster() remote = self.env.get_ssh_to_remote_by_name('slave-01') expected_kernel = self.get_slave_kernel(remote) checkers.upload_tarball(self.env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_tarball_exists(self.env.get_admin_remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') checkers.untar(self.env.get_admin_remote(), os.path.basename(hlp_data.TARBALL_PATH), '/var') checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password']) checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000, phrase='*** UPGRADE DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3]) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.env.bootstrap_nodes(self.env.nodes().slaves[3:4]) self.fuel_web.update_nodes(cluster_id, {'slave-04': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) os_conn = os_actions.OpenStackActions( self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'], user='******', tenant='ceph1', passwd='ceph1') self.fuel_web.assert_cluster_ready(os_conn, smiles_count=10, networks_count=1, timeout=300) self.fuel_web.run_ostf(cluster_id=cluster_id) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: remote = self.env.get_ssh_to_remote_by_name('slave-04') kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env") self.env.make_snapshot("upgrade_simple")
def upgrade_ha_one_controller(self): """Upgrade ha one controller deployed cluster with ceph Scenario: 1. Revert snapshot with ha one controller ceph env 2. Run upgrade on master 3. Check that upgrade was successful 4. Run network verification 5. Run OSTF 6. Add another compute node 7. Re-deploy cluster 8. Run OSTF """ if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'): raise SkipTest() self.env.revert_snapshot('ceph_ha_one_controller_compact') cluster_id = self.fuel_web.get_last_created_cluster() _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) expected_kernel = self.get_slave_kernel(remote) checkers.upload_tarball(self.env.d_env.get_admin_remote(), hlp_data.TARBALL_PATH, '/var') checkers.check_file_exists(self.env.d_env.get_admin_remote(), os.path.join('/var', os.path.basename( hlp_data.TARBALL_PATH))) checkers.untar(self.env.d_env.get_admin_remote(), os.path.basename(hlp_data. TARBALL_PATH), '/var') checkers.run_script(self.env.d_env.get_admin_remote(), '/var', 'upgrade.sh', password=hlp_data.KEYSTONE_CREDS['password']) checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000, phrase='*** UPGRADING MASTER NODE' ' DONE SUCCESSFULLY') checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(), hlp_data.UPGRADE_FUEL_FROM, hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nodes_in_ready_state(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3]) self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO) self.fuel_web.assert_nailgun_upgrade_migration() self.fuel_web.verify_network(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[3:4]) self.fuel_web.update_nodes( cluster_id, {'slave-04': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE: _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip'] remote = self.env.d_env.get_ssh_to_remote(_ip) kernel = self.get_slave_kernel(remote) checkers.check_kernel(kernel, expected_kernel) create_diagnostic_snapshot( self.env, "pass", "upgrade_ha_one_controller") self.env.make_snapshot("upgrade_ha_one_controller")
def make_diagnostic_snapshot(self, status, name): self.env.resume_environment() create_diagnostic_snapshot(self.env, status, name)