def deploy_neutron_tun_ha_nodegroups(self): """Deploy HA environment with NeutronVXLAN and 2 nodegroups Scenario: 1. Revert snapshot with ready master node 2. Bootstrap slaves from default nodegroup 3. Create cluster with Neutron VXLAN and custom nodegroups 4. Remove 2nd custom nodegroup which is added automatically 5. Bootstrap slave nodes from custom nodegroup 6. Download network configuration 7. Update network.json with customized ip ranges 8. Put new json on master node and update network data 9. Verify that new IP ranges are applied for network config 10. Add 3 controller nodes from default nodegroup 11. Add 2 compute nodes from custom nodegroup 12. Deploy cluster 13. Run network verification 14. Verify that excluded ip is not used for nodes or VIP 15. Run health checks (OSTF) Duration 110m Snapshot deploy_neutron_tun_ha_nodegroups """ if not MULTIPLE_NETWORKS: raise SkipTest() self.show_step(1, initialize=True) self.env.revert_snapshot("ready") self.show_step(2) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3]) self.show_step(3) cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, mode=DEPLOYMENT_MODE_HA, settings={ "net_provider": 'neutron', "net_segment_type": NEUTRON_SEGMENT['tun'], 'tenant': 'haVxlan', 'user': '******', 'password': '******' } ) self.show_step(4) self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id) custom_group2 = self.fuel_web.get_nodegroup( cluster_id, name=NODEGROUPS[2]['name']) wait(lambda: not self.is_update_dnsmasq_running( self.fuel_web.client.get_tasks()), timeout=60, timeout_msg="Timeout exceeded while waiting for task " "'update_dnsmasq' is finished!") self.fuel_web.client.delete_nodegroup(custom_group2['id']) self.show_step(5) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) self.show_step(6) with self.env.d_env.get_admin_remote() as remote: check_get_network_data_over_cli(remote, cluster_id, '/var/log/') management_ranges_default = [] management_ranges_custom = [] storage_ranges_default = [] storage_ranges_custom = [] default_group_id = self.fuel_web.get_nodegroup(cluster_id)['id'] custom_group_id = self.fuel_web.get_nodegroup( cluster_id, name=NODEGROUPS[1]['name'])['id'] self.show_step(7) with self.env.d_env.get_admin_remote() as remote: current_net = json.loads(remote.open( '/var/log/network_1.json').read()) # Get storage ranges for default and custom groups storage_ranges_default.append(self.get_modified_ranges( current_net, 'storage', group_id=default_group_id)) storage_ranges_custom.append(self.get_modified_ranges( current_net, 'storage', group_id=custom_group_id)) management_ranges_default.append(self.get_modified_ranges( current_net, 'management', group_id=default_group_id)) management_ranges_custom.append(self.get_modified_ranges( current_net, 'management', group_id=custom_group_id)) update_data = { default_group_id: {'storage': storage_ranges_default, 'management': management_ranges_default}, custom_group_id: {'storage': storage_ranges_custom, 'management': management_ranges_custom}} updated_network = self.update_network_ranges( current_net, update_data) logger.debug( 'Plan to update ranges for default group to {0} for storage ' 'and {1} for management and for custom group storage {2},' ' management {3}'.format(storage_ranges_default, management_ranges_default, storage_ranges_custom, management_ranges_custom)) # need to push to remote self.show_step(8) utils.put_json_on_remote_from_dict( remote, updated_network, cluster_id) check_update_network_data_over_cli(remote, cluster_id, '/var/log/') self.show_step(9) with self.env.d_env.get_admin_remote() as remote: check_get_network_data_over_cli(remote, cluster_id, '/var/log/') latest_net = json.loads(remote.open( '/var/log/network_1.json').read()) updated_storage_default = self.get_ranges(latest_net, 'storage', default_group_id) updated_storage_custom = self.get_ranges(latest_net, 'storage', custom_group_id) updated_mgmt_default = self.get_ranges(latest_net, 'management', default_group_id) updated_mgmt_custom = self.get_ranges(latest_net, 'management', custom_group_id) asserts.assert_equal( updated_storage_default, storage_ranges_default, 'Looks like storage range for default nodegroup ' 'was not updated. Expected {0}, Actual: {1}'.format( storage_ranges_default, updated_storage_default)) asserts.assert_equal( updated_storage_custom, storage_ranges_custom, 'Looks like storage range for custom nodegroup ' 'was not updated. Expected {0}, Actual: {1}'.format( storage_ranges_custom, updated_storage_custom)) asserts.assert_equal( updated_mgmt_default, management_ranges_default, 'Looks like management range for default nodegroup was ' 'not updated. Expected {0}, Actual: {1}'.format( management_ranges_default, updated_mgmt_default)) asserts.assert_equal( updated_mgmt_custom, management_ranges_custom, 'Looks like management range for custom nodegroup was ' 'not updated. Expected {0}, Actual: {1}'.format( management_ranges_custom, updated_mgmt_custom)) self.show_step(10) self.show_step(11) nodegroup_default = NODEGROUPS[0]['name'] nodegroup_custom1 = NODEGROUPS[1]['name'] self.fuel_web.update_nodes( cluster_id, { 'slave-01': [['controller'], nodegroup_default], 'slave-02': [['controller'], nodegroup_default], 'slave-03': [['controller'], nodegroup_default], 'slave-04': [['compute', 'cinder'], nodegroup_custom1], 'slave-05': [['compute', 'cinder'], nodegroup_custom1], } ) self.show_step(12) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(13) self.fuel_web.verify_network(cluster_id) self.show_step(14) net_data_default_group = [ data['network_data'] for data in self.fuel_web.client.list_cluster_nodes( cluster_id) if data['group_id'] == default_group_id] for net_node in net_data_default_group: for net in net_node: if 'storage' in net['name']: asserts.assert_true( self.is_ip_in_range( net['ip'].split('/')[0], updated_storage_default[0][0], updated_storage_default[0][-1])) if 'management' in net['name']: asserts.assert_true( self.is_ip_in_range( net['ip'].split('/')[0], updated_mgmt_default[0][0], updated_mgmt_default[0][-1])) net_data_custom_group = [ data['network_data'] for data in self.fuel_web.client.list_cluster_nodes( cluster_id) if data['group_id'] == custom_group_id] for net_node in net_data_custom_group: for net in net_node: if 'storage' in net['name']: asserts.assert_true( self.is_ip_in_range( net['ip'].split('/')[0], updated_storage_custom[0][0], updated_storage_custom[0][-1])) if 'management' in net['name']: asserts.assert_true( self.is_ip_in_range( net['ip'].split('/')[0], updated_mgmt_custom[0][0], updated_mgmt_custom[0][-1])) mgmt_vrouter_vip = self.fuel_web.get_management_vrouter_vip( cluster_id) logger.debug('Management vrouter vips is {0}'.format( mgmt_vrouter_vip)) mgmt_vip = self.fuel_web.get_mgmt_vip(cluster_id) logger.debug('Management vips is {0}'.format(mgmt_vip)) # check for defaults asserts.assert_true(self.is_ip_in_range(mgmt_vrouter_vip.split('/')[0], updated_mgmt_default[0][0], updated_mgmt_default[0][-1])) asserts.assert_true(self.is_ip_in_range(mgmt_vip.split('/')[0], updated_mgmt_default[0][0], updated_mgmt_default[0][-1])) self.show_step(15) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("deploy_neutron_tun_ha_nodegroups", is_make=True)
def deploy_neutron_tun_ha_nodegroups(self): """Deploy HA environment with NeutronVXLAN and 2 nodegroups Scenario: 1. Revert snapshot with ready master node 2. Bootstrap slaves from default nodegroup 3. Create cluster with Neutron VXLAN and custom nodegroups 4. Remove 2nd custom nodegroup which is added automatically 5. Bootstrap slave nodes from custom nodegroup 6. Download network configuration 7. Update network.json with customized ip ranges 8. Put new json on master node and update network data 9. Verify that new IP ranges are applied for network config 10. Add 3 controller nodes from default nodegroup 11. Add 2 compute nodes from custom nodegroup 12. Deploy cluster 13. Run network verification 14. Verify that excluded ip is not used for nodes or VIP 15. Run health checks (OSTF) Duration 110m Snapshot deploy_neutron_tun_ha_nodegroups """ if not MULTIPLE_NETWORKS: raise SkipTest("MULTIPLE_NETWORKS not enabled") self.show_step(1, initialize=True) self.env.revert_snapshot("ready") self.show_step(2) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3]) self.show_step(3) cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, mode=DEPLOYMENT_MODE_HA, settings={ "net_provider": "neutron", "net_segment_type": NEUTRON_SEGMENT["tun"], "tenant": "haVxlan", "user": "******", "password": "******", }, ) self.show_step(4) self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id) custom_group2 = self.fuel_web.get_nodegroup(cluster_id, name=NODEGROUPS[2]["name"]) wait( lambda: not self.is_update_dnsmasq_running(self.fuel_web.client.get_tasks()), timeout=60, timeout_msg="Timeout exceeded while waiting for task " "'update_dnsmasq' is finished!", ) self.fuel_web.client.delete_nodegroup(custom_group2["id"]) self.show_step(5) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) self.show_step(6) check_get_network_data_over_cli(self.ssh_manager.admin_ip, cluster_id, "/var/log/") management_ranges_default = [] management_ranges_custom = [] storage_ranges_default = [] storage_ranges_custom = [] default_group_id = self.fuel_web.get_nodegroup(cluster_id)["id"] custom_group_id = self.fuel_web.get_nodegroup(cluster_id, name=NODEGROUPS[1]["name"])["id"] self.show_step(7) with self.env.d_env.get_admin_remote() as remote: current_net = json.loads(remote.open("/var/log/network_1.json").read()) # Get storage ranges for default and custom groups storage_ranges_default.append(self.get_modified_ranges(current_net, "storage", group_id=default_group_id)) storage_ranges_custom.append(self.get_modified_ranges(current_net, "storage", group_id=custom_group_id)) management_ranges_default.append( self.get_modified_ranges(current_net, "management", group_id=default_group_id) ) management_ranges_custom.append( self.get_modified_ranges(current_net, "management", group_id=custom_group_id) ) update_data = { default_group_id: {"storage": storage_ranges_default, "management": management_ranges_default}, custom_group_id: {"storage": storage_ranges_custom, "management": management_ranges_custom}, } updated_network = self.update_network_ranges(current_net, update_data) logger.debug( "Plan to update ranges for default group to {0} for storage " "and {1} for management and for custom group storage {2}," " management {3}".format( storage_ranges_default, management_ranges_default, storage_ranges_custom, management_ranges_custom ) ) # need to push to remote self.show_step(8) with remote.open("/var/log/network_{0}.json".format(cluster_id), mode="w") as file_obj: json.dump(updated_network, file_obj) check_update_network_data_over_cli(self.ssh_manager.admin_ip, cluster_id, "/var/log/") self.show_step(9) with self.env.d_env.get_admin_remote() as remote: check_get_network_data_over_cli(self.ssh_manager.admin_ip, cluster_id, "/var/log/") latest_net = json.loads(remote.open("/var/log/network_1.json").read()) updated_storage_default = self.get_ranges(latest_net, "storage", default_group_id) updated_storage_custom = self.get_ranges(latest_net, "storage", custom_group_id) updated_mgmt_default = self.get_ranges(latest_net, "management", default_group_id) updated_mgmt_custom = self.get_ranges(latest_net, "management", custom_group_id) asserts.assert_equal( updated_storage_default, storage_ranges_default, "Looks like storage range for default nodegroup " "was not updated. Expected {0}, Actual: {1}".format(storage_ranges_default, updated_storage_default), ) asserts.assert_equal( updated_storage_custom, storage_ranges_custom, "Looks like storage range for custom nodegroup " "was not updated. Expected {0}, Actual: {1}".format(storage_ranges_custom, updated_storage_custom), ) asserts.assert_equal( updated_mgmt_default, management_ranges_default, "Looks like management range for default nodegroup was " "not updated. Expected {0}, Actual: {1}".format(management_ranges_default, updated_mgmt_default), ) asserts.assert_equal( updated_mgmt_custom, management_ranges_custom, "Looks like management range for custom nodegroup was " "not updated. Expected {0}, Actual: {1}".format(management_ranges_custom, updated_mgmt_custom), ) self.show_step(10) self.show_step(11) nodegroup_default = NODEGROUPS[0]["name"] nodegroup_custom1 = NODEGROUPS[1]["name"] self.fuel_web.update_nodes( cluster_id, { "slave-01": [["controller"], nodegroup_default], "slave-02": [["controller"], nodegroup_default], "slave-03": [["controller"], nodegroup_default], "slave-04": [["compute", "cinder"], nodegroup_custom1], "slave-05": [["compute", "cinder"], nodegroup_custom1], }, ) self.show_step(12) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(13) self.fuel_web.verify_network(cluster_id) self.show_step(14) net_data_default_group = [ data["network_data"] for data in self.fuel_web.client.list_cluster_nodes(cluster_id) if data["group_id"] == default_group_id ] for net_node in net_data_default_group: for net in net_node: if "storage" in net["name"]: asserts.assert_true( self.is_ip_in_range( net["ip"].split("/")[0], updated_storage_default[0][0], updated_storage_default[0][-1] ) ) if "management" in net["name"]: asserts.assert_true( self.is_ip_in_range( net["ip"].split("/")[0], updated_mgmt_default[0][0], updated_mgmt_default[0][-1] ) ) net_data_custom_group = [ data["network_data"] for data in self.fuel_web.client.list_cluster_nodes(cluster_id) if data["group_id"] == custom_group_id ] for net_node in net_data_custom_group: for net in net_node: if "storage" in net["name"]: asserts.assert_true( self.is_ip_in_range( net["ip"].split("/")[0], updated_storage_custom[0][0], updated_storage_custom[0][-1] ) ) if "management" in net["name"]: asserts.assert_true( self.is_ip_in_range( net["ip"].split("/")[0], updated_mgmt_custom[0][0], updated_mgmt_custom[0][-1] ) ) mgmt_vrouter_vip = self.fuel_web.get_management_vrouter_vip(cluster_id) logger.debug("Management vrouter vips is {0}".format(mgmt_vrouter_vip)) mgmt_vip = self.fuel_web.get_mgmt_vip(cluster_id) logger.debug("Management vips is {0}".format(mgmt_vip)) # check for defaults asserts.assert_true( self.is_ip_in_range(mgmt_vrouter_vip.split("/")[0], updated_mgmt_default[0][0], updated_mgmt_default[0][-1]) ) asserts.assert_true( self.is_ip_in_range(mgmt_vip.split("/")[0], updated_mgmt_default[0][0], updated_mgmt_default[0][-1]) ) self.show_step(15) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("deploy_neutron_tun_ha_nodegroups", is_make=True)
def update_network_config(self, cluster_id): """Update network configuration.""" check_get_network_data_over_cli(self.ssh_manager.admin_ip, cluster_id, '/var/log/') management_ranges_default = [] management_ranges_custom = [] storage_ranges_default = [] storage_ranges_custom = [] default_group_id = self.fuel_web.get_nodegroup(cluster_id)['id'] custom_group_id = self.fuel_web.get_nodegroup( cluster_id, name=NODEGROUPS[1]['name'])['id'] self.show_step(9) with self.env.d_env.get_admin_remote() as remote: current_net = json.loads( remote.open('/var/log/network_1.json').read()) # Get storage ranges for default and custom groups storage_ranges_default.append( self.get_modified_ranges(current_net, 'storage', group_id=default_group_id)) storage_ranges_custom.append( self.get_modified_ranges(current_net, 'storage', group_id=custom_group_id)) management_ranges_default.append( self.get_modified_ranges(current_net, 'management', group_id=default_group_id)) management_ranges_custom.append( self.get_modified_ranges(current_net, 'management', group_id=custom_group_id)) update_data = { default_group_id: { 'storage': storage_ranges_default, 'management': management_ranges_default }, custom_group_id: { 'storage': storage_ranges_custom, 'management': management_ranges_custom } } updated_network = self.update_network_ranges( current_net, update_data) logger.debug( 'Plan to update ranges for default group to {0} for storage ' 'and {1} for management and for custom group storage {2},' ' management {3}'.format(storage_ranges_default, management_ranges_default, storage_ranges_custom, management_ranges_custom)) self.show_step(10) with remote.open('/var/log/network_{0}.json'.format(cluster_id), mode='w') as file_obj: json.dump(updated_network, file_obj) check_update_network_data_over_cli(self.ssh_manager.admin_ip, cluster_id, '/var/log/') self.show_step(11) check_get_network_data_over_cli(self.ssh_manager.admin_ip, cluster_id, '/var/log/') latest_net = json.loads( remote.open('/var/log/network_1.json').read()) updated_storage_default = self.get_ranges(latest_net, 'storage', default_group_id) updated_storage_custom = self.get_ranges(latest_net, 'storage', custom_group_id) updated_mgmt_default = self.get_ranges(latest_net, 'management', default_group_id) updated_mgmt_custom = self.get_ranges(latest_net, 'management', custom_group_id) asserts.assert_equal( updated_storage_default, storage_ranges_default, 'Looks like storage range for default nodegroup ' 'was not updated. Expected {0}, Actual: {1}'.format( storage_ranges_default, updated_storage_default)) asserts.assert_equal( updated_storage_custom, storage_ranges_custom, 'Looks like storage range for custom nodegroup ' 'was not updated. Expected {0}, Actual: {1}'.format( storage_ranges_custom, updated_storage_custom)) asserts.assert_equal( updated_mgmt_default, management_ranges_default, 'Looks like management range for default nodegroup was ' 'not updated. Expected {0}, Actual: {1}'.format( management_ranges_default, updated_mgmt_default)) asserts.assert_equal( updated_mgmt_custom, management_ranges_custom, 'Looks like management range for custom nodegroup was ' 'not updated. Expected {0}, Actual: {1}'.format( management_ranges_custom, updated_mgmt_custom)) return updated_storage_default, updated_storage_custom, \ updated_mgmt_default, updated_mgmt_custom