def backup_restore_master_base(self): """Backup/restore master node Scenario: 1. Revert snapshot "empty" 2. Backup master 3. Check backup 4. Restore master 5. Check restore 6. Check iptables Duration 30m """ self.env.revert_snapshot("empty") with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) with RunLimit( seconds=60 * 10, error_message="'dockerctl restore' " "run longer then 600 sec"): self.fuel_web.restore_master(self.ssh_manager.admin_ip) self.fuel_web.restore_check_nailgun_api() checkers.restore_check_sum(self.ssh_manager.admin_ip) checkers.iptables_check(self.ssh_manager.admin_ip)
def backup_restore_master_base(self): """Backup/restore master node Scenario: 1. Revert snapshot "empty" 2. Backup master 3. Check backup 4. Restore master 5. Check restore 6. Check iptables Duration 30m """ self.env.revert_snapshot("empty") with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) with RunLimit(seconds=60 * 10, error_message="'dockerctl restore' " "run longer then 600 sec"): self.fuel_web.restore_master(remote) self.fuel_web.restore_check_nailgun_api() checkers.restore_check_sum(remote) checkers.iptables_check(remote)
def ha_one_controller_backup_restore(self): """Backup/restore master node with one controller in cluster Scenario: 1. Revert snapshot "deploy_ha_one_controller_backup_restore" 2. Backup master 3. Check backup 4. Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 35m """ self.env.revert_snapshot("deploy_ha_one_controller_backup_restore") cluster_id = self.fuel_web.get_last_created_cluster() os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), 'neutronOneController', 'neutronOneController', 'neutronOneController') self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) with self.env.d_env.get_admin_remote() as remote: # Execute master node backup self.fuel_web.backup_master(remote) # Check created backup checkers.backup_check(remote) self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute']}, True, False) assert_equal( 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) with self.env.d_env.get_admin_remote() as remote: with RunLimit( seconds=60 * 10, error_message="'dockerctl restore' " "run longer then 600 sec"): self.fuel_web.restore_master(remote) checkers.restore_check_sum(remote) self.fuel_web.restore_check_nailgun_api() checkers.iptables_check(remote) assert_equal( 2, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf( cluster_id=cluster_id) self.env.make_snapshot("ha_one_controller_backup_restore")
def neutron_tun_ha_backup_restore(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_neutron_tun_ha" 2. Backup master 3. Check backup 4. Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 50m """ self.env.revert_snapshot("deploy_neutron_tun_ha_backup_restore") cluster_id = self.fuel_web.get_last_created_cluster() cluster = self.fuel_web.client.get_cluster(cluster_id) assert_equal(str(cluster['net_provider']), 'neutron') os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), 'haTun', 'haTun', 'haTun') self.fuel_web.check_fixed_network_cidr(cluster_id, os_conn) with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}, True, False) assert_equal(6, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) with self.env.d_env.get_admin_remote() as remote: with RunLimit(seconds=60 * 10, error_message="'dockerctl restore' " "run longer then 600 sec"): self.fuel_web.restore_master(remote) checkers.restore_check_sum(remote) self.fuel_web.restore_check_nailgun_api() checkers.iptables_check(remote) assert_equal(5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("neutron_tun_ha_backup_restore")
def backup_restore_ha_flat(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_ha_flat" 2. Backup master 3. Check backup 4 Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 50m """ self.env.revert_snapshot("deploy_ha_flat") cluster_id = self.fuel_web.get_last_created_cluster() os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), 'novaHaFlat', 'novaHaFlat', 'novaHaFlat') self.fuel_web.assert_cluster_ready( os_conn, smiles_count=16, networks_count=1, timeout=300) self.fuel_web.backup_master(self.env.d_env.get_admin_remote()) checkers.backup_check(self.env.d_env.get_admin_remote()) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['compute']}, True, False ) assert_equal( 6, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.restore_master(self.env.d_env.get_admin_remote()) checkers.restore_check_sum(self.env.d_env.get_admin_remote()) self.fuel_web.restore_check_nailgun_api( self.env.d_env.get_admin_remote()) checkers.iptables_check(self.env.d_env.get_admin_remote()) assert_equal( 5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf( cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("backup_restore_ha_flat")
def ha_one_controller_backup_restore(self): """Backup/restore master node with one controller in cluster Scenario: 1. Revert snapshot "deploy_ha_one_controller_backup_restore" 2. Backup master 3. Check backup 4. Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 35m """ self.env.revert_snapshot("deploy_ha_one_controller_backup_restore") cluster_id = self.fuel_web.get_last_created_cluster() os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), 'neutronOneController', 'neutronOneController', 'neutronOneController') self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) with self.env.d_env.get_admin_remote() as remote: # Execute master node backup self.fuel_web.backup_master(remote) # Check created backup checkers.backup_check(remote) self.fuel_web.update_nodes(cluster_id, {'slave-03': ['compute']}, True, False) assert_equal(3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) with self.env.d_env.get_admin_remote() as remote: with RunLimit(seconds=60 * 10, error_message="'dockerctl restore' " "run longer then 600 sec"): self.fuel_web.restore_master(remote) checkers.restore_check_sum(remote) self.fuel_web.restore_check_nailgun_api() checkers.iptables_check(remote) assert_equal(2, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.update_nodes(cluster_id, {'slave-03': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("ha_one_controller_backup_restore")
def ha_one_controller_backup_restore(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_ha_one_controller_flat" 2. Backup master 3. Check backup 4. Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 35m """ self.env.revert_snapshot("deploy_ha_one_controller_flat") cluster_id = self.fuel_web.get_last_created_cluster() os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), 'novaSimpleFlat', 'novaSimpleFlat', 'novaSimpleFlat') self.fuel_web.assert_cluster_ready( os_conn, smiles_count=6, networks_count=1, timeout=300) # Execute master node backup self.fuel_web.backup_master(self.env.d_env.get_admin_remote()) # Check created backup checkers.backup_check(self.env.d_env.get_admin_remote()) self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute']}, True, False) assert_equal( 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.restore_master(self.env.d_env.get_admin_remote()) checkers.restore_check_sum(self.env.d_env.get_admin_remote()) self.fuel_web.restore_check_nailgun_api( self.env.d_env.get_admin_remote()) checkers.iptables_check(self.env.d_env.get_admin_remote()) assert_equal( 2, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf( cluster_id=cluster_id) self.env.make_snapshot("ha_one_controller_backup_restore")
def backup_restore_ha_flat(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_ha_flat" 2. Backup master 3. Check backup 4 Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 50m """ self.env.revert_snapshot("deploy_ha_flat") cluster_id = self.fuel_web.get_last_created_cluster() os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), 'novaHaFlat', 'novaHaFlat', 'novaHaFlat') self.fuel_web.assert_cluster_ready(os_conn, smiles_count=16, networks_count=1, timeout=300) self.fuel_web.backup_master(self.env.get_admin_remote()) checkers.backup_check(self.env.get_admin_remote()) self.env.bootstrap_nodes(self.env.nodes().slaves[5:6]) self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}, True, False) assert_equal(6, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.restore_master(self.env.get_admin_remote()) checkers.restore_check_sum(self.env.get_admin_remote()) self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote()) checkers.iptables_check(self.env.get_admin_remote()) assert_equal(5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.env.bootstrap_nodes(self.env.nodes().slaves[5:6]) self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("backup_restore_ha_flat")
def ha_one_controller_backup_restore(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_ha_one_controller_flat" 2. Backup master 3. Check backup 4. Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 35m """ self.env.revert_snapshot("deploy_ha_one_controller_flat") cluster_id = self.fuel_web.get_last_created_cluster() os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), 'novaSimpleFlat', 'novaSimpleFlat', 'novaSimpleFlat') self.fuel_web.assert_cluster_ready( os_conn, smiles_count=6, networks_count=1, timeout=300) self.fuel_web.backup_master(self.env.get_admin_remote()) checkers.backup_check(self.env.get_admin_remote()) self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute']}, True, False) assert_equal( 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.restore_master(self.env.get_admin_remote()) checkers.restore_check_sum(self.env.get_admin_remote()) self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote()) checkers.iptables_check(self.env.get_admin_remote()) assert_equal( 2, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf( cluster_id=cluster_id) self.env.make_snapshot("ha_one_controller_backup_restore")
def simple_backup_restore(self): """Backup/restore master node with cluster in simple mode Scenario: 1. Revert snapshot "deploy_simple_flat" 2. Backup master 3. Check backup 4. Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF """ self.env.revert_snapshot("deploy_simple_flat") cluster_id = self.fuel_web.get_last_created_cluster() os_conn = os_actions.OpenStackActions( self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']) self.fuel_web.assert_cluster_ready( os_conn, smiles_count=6, networks_count=1, timeout=300) self.fuel_web.backup_master(self.env.get_admin_remote()) checkers.backup_check(self.env.get_admin_remote()) self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute']}, True, False) assert_equal( 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.restore_master(self.env.get_admin_remote()) checkers.restore_check_sum(self.env.get_admin_remote()) self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote()) checkers.iptables_check(self.env.get_admin_remote()) assert_equal( 2, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.fuel_web.update_nodes( cluster_id, {'slave-03': ['compute']}, True, False) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf( cluster_id=cluster_id) self.env.make_snapshot("simple_backup_restore")
def backup_restore_master_base(self): """Backup/restore master node Scenario: 1. Revert snapshot "empty" 2. Backup master 3. Check backup 4. Restore master 5. Check restore """ self.env.revert_snapshot("empty") self.fuel_web.backup_master(self.env.get_admin_remote()) checkers.backup_check(self.env.get_admin_remote()) self.fuel_web.restore_master(self.env.get_admin_remote()) self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote()) checkers.restore_check_sum(self.env.get_admin_remote()) checkers.iptables_check(self.env.get_admin_remote())
def backup_restore_master_base(self): """Backup/restore master node Scenario: 1. Revert snapshot "empty" 2. Backup master 3. Check backup 4. Restore master 5. Check restore 6. Check iptables Duration 30m """ self.env.revert_snapshot("empty") with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) self.fuel_web.restore_master(remote) self.fuel_web.restore_check_nailgun_api(remote) checkers.restore_check_sum(remote) checkers.iptables_check(remote)
def create_backup_reset_restore_and_deploy_via_cli(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Create env with 1 Controller, 1 Compute, 1 Ceph 2. Start provisioning and wait for it is finished 3. Backup master 4. Reset env 5. Restore master 6. Delete env 7. Create new env via CLI with the same staff 8. Start provisioning via CLI Duration 75m """ self.env.revert_snapshot("ready_with_3_slaves") cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, mode=DEPLOYMENT_MODE, settings={ "net_provider": 'neutron', "net_segment_type": NEUTRON_SEGMENT_TYPE }) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller'], 'slave-02': ['compute'], 'slave-03': ['ceph-osd'] }) self.fuel_web.provisioning_cluster_wait(cluster_id) with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) self.fuel_web.stop_reset_env_wait(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3], timeout=10 * 60) with self.env.d_env.get_admin_remote() as remote: with RunLimit(seconds=60 * 10, error_message="'dockerctl restore' " "ran longer then 600 sec"): self.fuel_web.restore_master(remote) checkers.restore_check_sum(remote) number_of_nodes = len( self.fuel_web.client.list_cluster_nodes(cluster_id)) self.fuel_web.client.delete_cluster(cluster_id) try: wait((lambda: len(self.fuel_web.client.list_nodes()) == number_of_nodes), timeout=5 * 60) except TimeoutError: assert_true( len(self.fuel_web.client.list_nodes()) == number_of_nodes, 'Nodes are not discovered in timeout 5 *60') cl = CommandLine() release_id = self.fuel_web.get_releases_list_for_os( release_name=OPENSTACK_RELEASE)[0] node_ids = [ self.fuel_web.get_nailgun_node_by_devops_node( self.env.d_env.nodes().slaves[slave_id])['id'] for slave_id in range(3) ] with self.env.d_env.get_admin_remote() as remote: # Create an environment if NEUTRON_SEGMENT_TYPE: nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE) else: nst = '' cmd = ('fuel env create --name={0} --release={1} ' '{2} --json'.format(self.__class__.__name__, release_id, nst)) env_result = run_on_remote(remote, cmd, jsonify=True) cluster_id = env_result['id'] # Update network parameters cl.update_cli_network_configuration(cluster_id, remote) # Update SSL configuration cl.update_ssl_configuration(cluster_id, remote) roles = { 'controller': node_ids[0], 'compute': node_ids[1], 'ceph-osd': node_ids[2] } for role in roles: cmd = ( 'fuel --env-id={0} node set --node {1} --role={2}'.format( cluster_id, roles[role], role)) remote.execute(cmd) cmd = ( 'fuel --env-id={0} node --provision --node={1} --json'.format( cluster_id, ','.join(str(l) for l in node_ids))) logger.info("Started provisioning via CLI") task = run_on_remote(remote, cmd, jsonify=True) cl.assert_cli_task_success(task, remote, timeout=30 * 60) logger.info("Finished provisioning via CLI")
def backup_reinstall_restore(self): """Backup, reinstall then restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_neutron_tun_ha" 2. Backup master 3. Check backup 4. Reinstall fuel-master node 5. Restore master 6. Check restore 7. Run OSTF 8. Add 1 compute 9. Run network check 10. Deploy cluster 11. Run OSTF Duration XXm """ self.env.revert_snapshot("deploy_neutron_tun_ha_backup_restore") cluster_id = self.fuel_web.get_last_created_cluster() with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) backup = checkers.find_backup(self.ssh_manager.admin_ip).strip() local_backup = os.path.join( settings.LOGS_DIR, os.path.basename(backup)) remote.download(backup, local_backup) assert_true(os.path.exists(local_backup), "Backup file wasn't downloaded!") self.env.reinstall_master_node() with self.env.d_env.get_admin_remote() as remote: remote.execute('mkdir -p {}'.format(os.path.dirname(backup))) remote.upload(local_backup, backup) assert_true(remote.exists(backup), "Backup file wasn't uploaded!") with RunLimit( seconds=60 * 10, error_message="'dockerctl restore' " "run longer then 600 sec"): self.fuel_web.restore_master(self.ssh_manager.admin_ip) checkers.restore_check_sum(self.ssh_manager.admin_ip) self.fuel_web.restore_check_nailgun_api() checkers.iptables_check(self.ssh_manager.admin_ip) assert_equal( 5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:6], skip_timesync=True) self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}) self.fuel_web.verify_network(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf( cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("neutron_tun_ha_backup_restore")
def create_backup_reset_restore_and_deploy_via_cli(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Create env with 1 Controller, 1 Compute, 1 Ceph 2. Start provisioning and wait for it is finished 3. Backup master 4. Reset env 5. Restore master 6. Delete env 7. Create new env via CLI with the same staff 8. Start provisioning via CLI Duration 75m """ self.env.revert_snapshot("ready_with_3_slaves") cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, mode=DEPLOYMENT_MODE, settings={ "net_provider": 'neutron', "net_segment_type": NEUTRON_SEGMENT_TYPE } ) self.fuel_web.update_nodes( cluster_id, {'slave-01': ['controller'], 'slave-02': ['compute'], 'slave-03': ['ceph-osd']} ) self.fuel_web.provisioning_cluster_wait(cluster_id) with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) self.fuel_web.stop_reset_env_wait(cluster_id) self.fuel_web.wait_nodes_get_online_state( self.env.d_env.nodes().slaves[:3], timeout=10 * 60) with RunLimit( seconds=60 * 10, error_message="'dockerctl restore' " "ran longer then 600 sec"): self.fuel_web.restore_master(self.ssh_manager.admin_ip) checkers.restore_check_sum(self.ssh_manager.admin_ip) number_of_nodes = len(self.fuel_web.client.list_cluster_nodes( cluster_id)) self.fuel_web.client.delete_cluster(cluster_id) try: wait((lambda: len( self.fuel_web.client.list_nodes()) == number_of_nodes), timeout=5 * 60) except TimeoutError: assert_true(len( self.fuel_web.client.list_nodes()) == number_of_nodes, 'Nodes are not discovered in timeout 5 *60') cl = CommandLine() release_id = self.fuel_web.get_releases_list_for_os( release_name=OPENSTACK_RELEASE)[0] node_ids = [self.fuel_web.get_nailgun_node_by_devops_node( self.env.d_env.nodes().slaves[slave_id])['id'] for slave_id in range(3)] # Create an environment if NEUTRON_SEGMENT_TYPE: nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE) else: nst = '' cmd = ('fuel env create --name={0} --release={1} ' '{2} --json'.format(self.__class__.__name__, release_id, nst)) env_result = self.ssh_manager.execute_on_remote( ip=self.ssh_manager.admin_ip, cmd=cmd, jsonify=True )['stdout_json'] cluster_id = env_result['id'] # Update network parameters cl.update_cli_network_configuration(cluster_id) # Update SSL configuration cl.update_ssl_configuration(cluster_id) roles = {'controller': node_ids[0], 'compute': node_ids[1], 'ceph-osd': node_ids[2]} for role in roles: cmd = ('fuel --env-id={0} node set --node {1} --role={2}' .format(cluster_id, roles[role], role)) self.ssh_manager.execute( ip=self.ssh_manager.admin_ip, cmd=cmd ) cmd = ( 'fuel --env-id={0} node --provision --node={1} --json'.format( cluster_id, ','.join(str(l) for l in node_ids)) ) logger.info("Started provisioning via CLI") task = self.ssh_manager.execute_on_remote( ip=self.ssh_manager.admin_ip, cmd=cmd, jsonify=True )['stdout_json'] cl.assert_cli_task_success(task, timeout=30 * 60) logger.info("Finished provisioning via CLI")
def neutron_tun_ha_backup_restore(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_neutron_tun_ha" 2. Backup master 3. Check backup 4. Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 50m """ self.env.revert_snapshot("deploy_neutron_tun_ha_backup_restore") cluster_id = self.fuel_web.get_last_created_cluster() cluster = self.fuel_web.client.get_cluster(cluster_id) assert_equal(str(cluster['net_provider']), 'neutron') os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id), 'haTun', 'haTun', 'haTun') self.fuel_web.check_fixed_network_cidr( cluster_id, os_conn) with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['compute']}, True, False ) assert_equal( 6, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) with RunLimit( seconds=60 * 10, error_message="'dockerctl restore' " "run longer then 600 sec"): self.fuel_web.restore_master(self.ssh_manager.admin_ip) checkers.restore_check_sum(self.ssh_manager.admin_ip) self.fuel_web.restore_check_nailgun_api() checkers.iptables_check(self.ssh_manager.admin_ip) assert_equal( 5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf( cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("neutron_tun_ha_backup_restore")
def backup_restore_neutron_tun_ha(self): """Backup/restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_neutron_tun_ha" 2. Backup master 3. Check backup 4. Run OSTF 5. Add 1 node with compute role 6. Restore master 7. Check restore 8. Run OSTF Duration 50m """ self.env.revert_snapshot("deploy_neutron_tun_ha") cluster_id = self.fuel_web.get_last_created_cluster() cluster = self.fuel_web.client.get_cluster(cluster_id) assert_equal(str(cluster['net_provider']), 'neutron') os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id)) # assert_equal(str(cluster['net_segment_type']), segment_type) self.fuel_web.check_fixed_network_cidr( cluster_id, os_conn) with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) with self.env.d_env.get_admin_remote() as remote: checkers.backup_check(remote) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['compute']}, True, False ) assert_equal( 6, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) with self.env.d_env.get_admin_remote() as remote: self.fuel_web.restore_master(remote) with self.env.d_env.get_admin_remote() as remote: checkers.restore_check_sum(remote) with self.env.d_env.get_admin_remote() as remote: self.fuel_web.restore_check_nailgun_api(remote) with self.env.d_env.get_admin_remote() as remote: checkers.iptables_check(remote) assert_equal( 5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.env.bootstrap_nodes( self.env.d_env.nodes().slaves[5:6]) self.fuel_web.update_nodes( cluster_id, {'slave-06': ['compute']}, True, False ) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf( cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("backup_restore_neutron_tun_ha")
def backup_reinstall_restore(self): """Backup, reinstall then restore master node with cluster in ha mode Scenario: 1. Revert snapshot "deploy_neutron_tun_ha" 2. Backup master 3. Check backup 4. Reinstall fuel-master node 5. Restore master 6. Check restore 7. Run OSTF 8. Add 1 compute 9. Run network check 10. Deploy cluster 11. Run OSTF Duration XXm """ self.env.revert_snapshot("deploy_neutron_tun_ha_backup_restore") cluster_id = self.fuel_web.get_last_created_cluster() with self.env.d_env.get_admin_remote() as remote: self.fuel_web.backup_master(remote) checkers.backup_check(remote) backup = checkers.find_backup(remote).strip() local_backup = os.path.join(settings.LOGS_DIR, os.path.basename(backup)) remote.download(backup, local_backup) assert_true(os.path.exists(local_backup), "Backup file wasn't downloaded!") self.env.reinstall_master_node() with self.env.d_env.get_admin_remote() as remote: remote.execute('mkdir -p {}'.format(os.path.dirname(backup))) remote.upload(local_backup, backup) assert_true(remote.exists(backup), "Backup file wasn't uploaded!") with RunLimit(seconds=60 * 10, error_message="'dockerctl restore' " "run longer then 600 sec"): self.fuel_web.restore_master(remote) checkers.restore_check_sum(remote) self.fuel_web.restore_check_nailgun_api() checkers.iptables_check(remote) assert_equal(5, len(self.fuel_web.client.list_cluster_nodes(cluster_id))) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[5:6], skip_timesync=True) self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}) self.fuel_web.verify_network(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("neutron_tun_ha_backup_restore")