def test_create_neither_present(self): values = { "which %s" % RemoteFirewallControlFirewallCmd.firewall_app_name: Shell.RunResult(1, "", "", False), "which %s" % RemoteFirewallControlIpTables.firewall_app_name: Shell.RunResult(1, "", "", False), } def side_effect(address, cmd): return values[cmd] self.mock_ssh.side_effect = side_effect with self.assertRaises(RuntimeError): RemoteFirewallControl.create("0.0.0.0", RRO(None)._ssh_address)
def _process_ip_rules(self, server, port_proto_filter=None): """ Retrieve matching rules or entire set from given server :param server: target server we wish to retrieve rules from :param port_proto_filter: optional list of port/proto pairs to look for :return: RemoteFirewallControl.rules list of matching active firewall rules """ # process rules on remote firewall in current state firewall = RemoteFirewallControl.create( server['address'], self.remote_operations._ssh_address_no_check) firewall.process_rules() if port_proto_filter: # we want to match firewall rules stored in member list 'firewall.rules' with those supplied in # port/proto tuples list 'port_proto_filter'. We also want to match rules with proto == 'all' (iptables). rules = [] for rule in firewall.rules: if (int(rule.port), rule.protocol) in port_proto_filter: rules.append(rule) elif rule.protocol == 'any' and int( rule.port) in [f[0] for f in port_proto_filter]: rules.append(rule) return rules else: return firewall.rules
def clear_ha(self, server_list): """ Stops and deletes all chroma targets for any corosync clusters configured on any of the lustre servers appearing in the cluster config """ for server in server_list: address = server["address"] if self.is_worker(server): logger.info("{} is configured as a worker -- skipping.".format(address)) continue if not self.has_pacemaker(server): logger.info("{} does not appear to have pacemaker - skipping any removal of targets.".format(address)) continue firewall = RemoteFirewallControl.create(address, self._ssh_address_no_check) result = self._ssh_address( address, "if crm_mon -b1; then crm_attribute --type crm_config --name maintenance-mode --update true; fi", ) logger.debug("CMD OUTPUT:\n%s" % result.stdout) result = self._ssh_address( address, "if crm_mon -b1; then crm_resource -l | xargs -n 1 crm_resource --set-parameter target-role --meta --parameter-value Stopped --resource; fi", ) logger.debug("CMD OUTPUT:\n%s" % result.stdout) result = self._ssh_address( address, "if crm_mon -b1; then crm_attribute --type crm_config --name maintenance-mode --delete true; fi", ) logger.debug("CMD OUTPUT:\n%s" % result.stdout) result = self._ssh_address(address, "if crm_mon -b1; then pcs cluster stop --all; fi") logger.debug("CMD OUTPUT:\n%s" % result.stdout) result = self._ssh_address(address, "pcs cluster destroy") logger.debug("CMD OUTPUT:\n%s" % result.stdout) self._ssh_address(address, "systemctl disable --now pcsd pacemaker corosync") self._ssh_address(address, "ifconfig %s 0.0.0.0 down" % (server["corosync_config"]["ring1_iface"])) self._ssh_address( address, "rm -f /etc/sysconfig/network-scripts/ifcfg-%s /etc/corosync/corosync.conf /var/lib/pacemaker/cib/* /var/lib/corosync/*" % (server["corosync_config"]["ring1_iface"]), ) self._ssh_address(address, firewall.remote_add_port_cmd(22, "tcp")) self._ssh_address(address, firewall.remote_add_port_cmd(988, "tcp"))
def omping(self, server, servers, count=5, timeout=30): firewall = RemoteFirewallControl.create(server["address"], self._ssh_address_no_check) self._ssh_address(server["address"], firewall.remote_add_port_cmd(4321, "udp")) result = self._ssh_address( server["address"], "exec 2>&1; omping -T %s -c %s %s" % (timeout, count, " ".join([s["nodename"] for s in servers])), ) self._ssh_address(server["address"], firewall.remote_remove_port_cmd(4321, "udp")) return result.stdout
def omping(self, server, servers, count=5, timeout=30): firewall = RemoteFirewallControl.create(server['address'], self._ssh_address_no_check) self._ssh_address(server['address'], firewall.remote_add_port_cmd(4321, 'udp')) result = self._ssh_address( server['address'], 'exec 2>&1; omping -T %s -c %s %s' % (timeout, count, " ".join([s['nodename'] for s in servers]))) self._ssh_address(server['address'], firewall.remote_remove_port_cmd(4321, 'udp')) return result.stdout
def clear_ha(self, server_list): """ Stops and deletes all chroma targets for any corosync clusters configured on any of the lustre servers appearing in the cluster config """ for server in server_list: address = server["address"] if self.is_worker(server): logger.info("{} is configured as a worker -- skipping.".format( address)) continue if not self.has_pacemaker(server): logger.info( "{} does not appear to have pacemaker - skipping any removal of targets." .format(address)) continue firewall = RemoteFirewallControl.create(address, self._ssh_address_no_check) clear_ha_script_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), "clear_ha_el7.sh") with open(clear_ha_script_file, "r") as clear_ha_script: result = self._ssh_address( address, "ring1_iface=%s\n%s" % (server["corosync_config"]["ring1_iface"], clear_ha_script.read())) logger.info( "clear_ha script on %s results... exit code %s. stdout:\n%s\nstderr:\n%s" % (server["nodename"], result.rc, result.stdout, result.stderr)) if result.rc != 0: logger.info( "clear_ha script on %s failed with exit code %s. stdout:\n%s\nstderr:\n%s" % (server["nodename"], result.rc, result.stdout, result.stderr)) raise RuntimeError( "Failed clear_ha script on '%s'!\nrc: %s\nstdout: %s\nstderr: %s" % (server, result.rc, result.stdout, result.stderr)) self._ssh_address(address, firewall.remote_add_port_cmd(22, "tcp")) self._ssh_address(address, firewall.remote_add_port_cmd(988, "tcp"))
def test_create(self): values = { "which %s" % RemoteFirewallControlFirewallCmd.firewall_app_name: Shell.RunResult(1, "", "", False), "which %s" % RemoteFirewallControlIpTables.firewall_app_name: Shell.RunResult(0, "", "", False), } def side_effect(address, cmd): return values[cmd] self.mock_ssh.side_effect = side_effect new_controller = RemoteFirewallControl.create("0.0.0.0", RRO(None)._ssh_address) self.assertEquals(type(new_controller), RemoteFirewallControlIpTables)
def test_process_rules_format_2(self): # firewall is active and configured with matching rules self.test_firewall = RemoteFirewallControlFirewallCmd( "10.0.0.1", self.example_func_4) self.assertEqual(len(self.test_firewall.rules), 0) response = self.test_firewall.process_rules() # create example named tuple to compare with objects in rules list example_rule = RemoteFirewallControl.firewall_rule("80", "tcp") self.assertEqual(response, None) self.assertEqual(len(self.test_firewall.rules), 4) self.assertEqual(self.test_firewall.rules[2], example_rule) self.assertEqual(self.test_firewall.rules[2].port, example_rule.port) self.assertEqual(self.test_firewall.rules[2].protocol, example_rule.protocol)
def test_agent(self): """ Test that when hosts are added and a filesytem is created, that all required firewall accesses are installed """ servers = self.TEST_SERVERS[0:4] host_addresses = [s['address'] for s in servers] self.hosts = self.add_hosts(host_addresses) self.configure_power_control(host_addresses) volumes = self.wait_for_shared_volumes(4, 4) mgt_volume = volumes[0] mdt_volume = volumes[1] ost1_volume = volumes[2] ost2_volume = volumes[3] self.set_volume_mounts(mgt_volume, self.hosts[0]['id'], self.hosts[1]['id']) self.set_volume_mounts(mdt_volume, self.hosts[1]['id'], self.hosts[0]['id']) self.set_volume_mounts(ost1_volume, self.hosts[2]['id'], self.hosts[3]['id']) self.set_volume_mounts(ost2_volume, self.hosts[3]['id'], self.hosts[2]['id']) self.filesystem_id = self.create_filesystem( self.hosts, { 'name': 'testfs', 'mgt': { 'volume_id': mgt_volume['id'] }, 'mdts': [{ 'volume_id': mdt_volume['id'], 'conf_params': {} }], 'osts': [{ 'volume_id': ost1_volume['id'], 'conf_params': {} }, { 'volume_id': ost2_volume['id'], 'conf_params': {} }], 'conf_params': {} }) mcast_ports = {} for server in servers: self.assertNotEqual( 'Enforcing\n', self.remote_operations._ssh_address(server['address'], 'getenforce').stdout) mcast_port = self.remote_operations.get_corosync_port( server['fqdn']) self.assertIsNotNone(mcast_port) mcast_ports[server['address']] = mcast_port matching_rules = self._process_ip_rules(server, [(mcast_port, 'udp'), (988, 'tcp')]) self.assertEqual(len(matching_rules), 2) # tear it down and make sure firewall rules are cleaned up self.graceful_teardown(self.chroma_manager) for server in servers: mcast_port = mcast_ports[server['address']] matching_rules = self._process_ip_rules(server, [(mcast_port, 'udp')]) self.assertEqual(len(matching_rules), 0) # retrieve command string compatible with this server target firewall = RemoteFirewallControl.create( server['address'], self.remote_operations._ssh_address_no_check) # test that the remote firewall configuration doesn't include rules to enable the mcast_port self.remote_operations._ssh_address( server['address'], firewall.remote_validate_persistent_rule_cmd(mcast_port), expected_return_code=self.GREP_NOTFOUND_RC)
def clear_ha(self, server_list): """ Stops and deletes all chroma targets for any corosync clusters configured on any of the lustre servers appearing in the cluster config """ for server in server_list: address = server['address'] if self.is_worker(server): logger.info("%s is configured as a worker -- skipping." % server['address']) continue if self.has_pacemaker(server): firewall = RemoteFirewallControl.create( address, self._ssh_address_no_check) if config.get('pacemaker_hard_reset', False): clear_ha_script_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), "clear_ha_el%s.sh" % re.search('\d', server['distro']).group(0)) with open(clear_ha_script_file, 'r') as clear_ha_script: result = self._ssh_address( address, "ring1_iface=%s\n%s" % (server['corosync_config']['ring1_iface'], clear_ha_script.read())) logger.info( "clear_ha script on %s results... exit code %s. stdout:\n%s\nstderr:\n%s" % (server['nodename'], result.rc, result.stdout, result.stderr)) if result.rc != 0: logger.info( "clear_ha script on %s failed with exit code %s. stdout:\n%s\nstderr:\n%s" % (server['nodename'], result.rc, result.stdout, result.stderr)) raise RuntimeError( "Failed clear_ha script on '%s'!\nrc: %s\nstdout: %s\nstderr: %s" % (server, result.rc, result.stdout, result.stderr)) self._ssh_address(address, firewall.remote_add_port_cmd(22, 'tcp')) self._ssh_address(address, firewall.remote_add_port_cmd(988, 'tcp')) self.unmount_lustre_targets(server) else: crm_targets = self.get_pacemaker_targets(server) # Stop targets and delete targets for target in crm_targets: self._ssh_address( address, 'crm_resource --resource %s --set-parameter target-role --meta --parameter-value Stopped' % target) for target in crm_targets: self._test_case.wait_until_true( lambda: not self.is_pacemaker_target_running( server, target)) self._ssh_address(address, 'pcs resource delete %s' % target) self._ssh_address(address, 'crm_resource -C -r %s' % target) # Verify no more targets self._test_case.wait_until_true( lambda: not self.get_pacemaker_targets(server)) # remove firewall rules previously added for corosync mcast_port = self.get_corosync_port(server['fqdn']) if mcast_port: self._ssh_address( address, firewall.remote_remove_port_cmd(mcast_port, 'udp')) rpm_q_result = self._ssh_address(address, "rpm -q chroma-agent", expected_return_code=None) if rpm_q_result.rc == 0: # Stop the agent self._ssh_address(address, 'systemctl stop chroma-agent') self._ssh_address( address, ''' rm -rf /var/lib/chroma/*; ''', expected_return_code= None # Keep going if it failed - may be none there. ) else: logger.info( "%s does not appear to have pacemaker - skipping any removal of targets." % address)