def check_interfaces_config_after_reboot(self): network_settings = dict() skip_interfaces = { r'^pub-base$', r'^vr_pub-base$', r'^vr-base$', r'^mgmt-base$', r'^vr-host-base$', r'^mgmt-conntrd$', r'^hapr-host$', r'^(tap|qr-|qg-|p_).*$', r'^v_vrouter.*$', r'^v_(management|public)$' } nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id) for node in nodes: with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: network_settings[node['hostname']] = \ get_net_settings(remote, skip_interfaces) self.fuel_web.warm_restart_nodes( self.fuel_web.get_devops_nodes_by_nailgun_nodes(nodes)) network_settings_changed = False for node in nodes: with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: saved_settings = network_settings[node['hostname']] actual_settings = get_net_settings(remote, skip_interfaces) if not saved_settings == actual_settings: network_settings_changed = True logger.error('Network settings were changed after reboot ' 'on node {0}! '.format(node['hostname'])) logger.debug('Network settings before the reboot of slave ' '{0}: {1}'.format(node['hostname'], saved_settings)) logger.debug('Network settings after the reboot of slave ' '{0}: {1}'.format(node['hostname'], actual_settings)) for iface in saved_settings: if iface not in actual_settings: logger.error("Interface '{0}' doesn't exist after " "reboot of '{1}'!".format( iface, node['hostname'])) continue if saved_settings[iface] != actual_settings[iface]: logger.error("Interface '{0}' settings " "were changed after reboot " "of '{1}': was {2}, now " "{3}.".format(iface, node['hostname'], saved_settings[iface], actual_settings[iface])) assert_false( network_settings_changed, "Network settings were changed after environment nodes " "reboot! Please check logs for details!")
def check_interfaces_config_after_reboot(self, cluster_id): network_settings = dict() skip_interfaces = set([r'^pub-base$', r'^vr_pub-base$', r'^vr-base$', r'^mgmt-base$', r'^vr-host-base$', r'^mgmt-conntrd$', r'^hapr-host$', r'^(tap|qr-|qg-|p_).*$', r'^v_vrouter.*$', r'^v_(management|public)$']) nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) for node in nodes: with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: network_settings[node['hostname']] = \ get_net_settings(remote, skip_interfaces) self.fuel_web.warm_restart_nodes( self.fuel_web.get_devops_nodes_by_nailgun_nodes(nodes)) network_settings_changed = False for node in nodes: with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: saved_settings = network_settings[node['hostname']] actual_settings = get_net_settings(remote, skip_interfaces) if not saved_settings == actual_settings: network_settings_changed = True logger.error('Network settings were changed after reboot ' 'on node {0}! '.format(node['hostname'])) logger.debug('Network settings before the reboot of slave ' '{0}: {1}'.format(node['hostname'], saved_settings)) logger.debug('Network settings after the reboot of slave ' '{0}: {1}'.format(node['hostname'], actual_settings)) for iface in saved_settings: if iface not in actual_settings: logger.error("Interface '{0}' doesn't exist after " "reboot of '{1}'!".format( iface, node['hostname'])) continue if saved_settings[iface] != actual_settings[iface]: logger.error("Interface '{0}' settings " "were changed after reboot " "of '{1}': was {2}, now " "{3}.".format(iface, node['hostname'], saved_settings[iface], actual_settings[iface])) assert_false(network_settings_changed, "Network settings were changed after environment nodes " "reboot! Please check logs for details!")
def add_custom_nodegroup_after_master_upgrade(self): """Add new nodegroup to existing operational environment after Fuel Master upgrade Scenario: 1. Revert "upgrade_multirack_restore" snapshot 2. Create new nodegroup for the environment and configure it's networks 3. Bootstrap slave node from custom-2 nodegroup 4. Add node from new nodegroup to the environment with compute role 5. Run network verification 6. Deploy changes 7. Run network verification 8. Run OSTF 9. Check that nodes from 'default' nodegroup can reach nodes from new nodegroup via management and storage networks Duration 50m Snapshot add_custom_nodegroup_after_master_upgrade """ self.show_step(1) self.env.revert_snapshot(self.snapshot_name) cluster_id = self.fuel_web.get_last_created_cluster() self.fuel_web.assert_nodes_in_ready_state(cluster_id) asserts.assert_true(not any(ng['name'] == NODEGROUPS[2]['name'] for ng in self.fuel_web.client.get_nodegroups()), 'Custom nodegroup {0} already ' 'exists!'.format(NODEGROUPS[2]['name'])) self.show_step(2) new_nodegroup = self.fuel_web.client.create_nodegroup( cluster_id, NODEGROUPS[2]['name']) logger.debug('Updating custom nodegroup ID in network configuration..') network_config_new = self.fuel_web.client.get_networks(cluster_id) with open(self.netgroup_description_file, "r") as file_obj: netconf_all_groups = yaml.load(file_obj) asserts.assert_true(netconf_all_groups is not None, 'Network configuration for nodegroups is empty!') for network in netconf_all_groups['networks']: if network['group_id'] is not None and \ not any(network['group_id'] == ng['id'] for ng in self.fuel_web.client.get_nodegroups()): network['group_id'] = new_nodegroup['id'] for new_network in network_config_new['networks']: if new_network['name'] == network['name'] and \ new_network['group_id'] == network['group_id']: network['id'] = new_network['id'] self.fuel_web.client.update_network( cluster_id, netconf_all_groups['networking_parameters'], netconf_all_groups['networks']) self.show_step(3) self.env.bootstrap_nodes([self.env.d_env.nodes().slaves[6]]) self.show_step(4) self.fuel_web.update_nodes( cluster_id, {'slave-07': [['compute'], new_nodegroup['name']]}, True, False ) self.show_step(5) self.fuel_web.verify_network(cluster_id) self.show_step(6) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(7) self.fuel_web.verify_network(cluster_id) self.show_step(8) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(9) primary_ctrl = self.fuel_web.get_nailgun_node_by_devops_node( self.fuel_web.get_nailgun_primary_node( slave=self.env.d_env.nodes().slaves[0])) with self.fuel_web.get_ssh_for_node('slave-07') as remote: new_node_networks = utils.get_net_settings(remote) for interface in ('br-storage', 'br-mgmt'): if interface in new_node_networks: logger.info("Checking new node is accessible from primary " "controller via {0} interface.".format(interface)) for ip in new_node_networks[interface]['ip_addresses']: address = ip.split('/')[0] result = check_ping(primary_ctrl['ip'], address, timeout=3) asserts.assert_true(result, "New node isn't accessible from " "primary controller via {0} interface" ": {1}.".format(interface, result)) self.env.make_snapshot("add_custom_nodegroup_after_master_upgrade")
def add_custom_nodegroup(self): """Add new nodegroup to operational environment Scenario: 1. Revert snapshot with operational cluster 2. Create new nodegroup for the environment and configure it's networks 3. Bootstrap slave node from custom-2 nodegroup 4. Add node from new nodegroup to the environment with compute role 5. Run network verification 6. Deploy changes 7. Run network verification 8. Run OSTF 9. Check that nodes from 'default' nodegroup can reach nodes from new nodegroup via management and storage networks Duration 50m Snapshot add_custom_nodegroup """ self.show_step(1, initialize=True) self.env.revert_snapshot("deploy_neutron_tun_ha_nodegroups") cluster_id = self.fuel_web.get_last_created_cluster() self.fuel_web.assert_nodes_in_ready_state(cluster_id) asserts.assert_true( not any(ng["name"] == NODEGROUPS[2]["name"] for ng in self.fuel_web.client.get_nodegroups()), "Custom nodegroup {0} already " "exists!".format(NODEGROUPS[2]["name"]), ) self.show_step(2) new_nodegroup = self.fuel_web.client.create_nodegroup(cluster_id, NODEGROUPS[2]["name"]) logger.debug("Updating custom nodegroup ID in network configuration..") network_config_new = self.fuel_web.client.get_networks(cluster_id) asserts.assert_true(self.netconf_all_groups is not None, "Network configuration for nodegroups is empty!") for network in self.netconf_all_groups["networks"]: if network["group_id"] is not None and not any( network["group_id"] == ng["id"] for ng in self.fuel_web.client.get_nodegroups() ): network["group_id"] = new_nodegroup["id"] for new_network in network_config_new["networks"]: if new_network["name"] == network["name"] and new_network["group_id"] == network["group_id"]: network["id"] = new_network["id"] self.fuel_web.client.update_network( cluster_id, self.netconf_all_groups["networking_parameters"], self.netconf_all_groups["networks"] ) self.show_step(3) self.env.bootstrap_nodes([self.env.d_env.nodes().slaves[6]]) self.show_step(4) self.fuel_web.update_nodes(cluster_id, {"slave-07": [["compute"], new_nodegroup["name"]]}, True, False) self.show_step(5) self.fuel_web.verify_network(cluster_id) self.show_step(6) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(7) self.fuel_web.verify_network(cluster_id) self.show_step(8) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(9) primary_ctrl = self.fuel_web.get_nailgun_node_by_devops_node( self.fuel_web.get_nailgun_primary_node(slave=self.env.d_env.nodes().slaves[0]) ) with self.fuel_web.get_ssh_for_node("slave-07") as remote: new_node_networks = utils.get_net_settings(remote) for interface in ("br-storage", "br-mgmt"): if interface in new_node_networks: logger.info( "Checking new node is accessible from primary " "controller via {0} interface.".format(interface) ) for ip in new_node_networks[interface]["ip_addresses"]: address = ip.split("/")[0] result = check_ping(primary_ctrl["ip"], address, timeout=3) asserts.assert_true( result, "New node isn't accessible from " "primary controller via {0} interface" ": {1}.".format(interface, result), ) self.env.make_snapshot("add_custom_nodegroup")