def clean_services_for_node(controller, node): services_stdout = nova.run_nova_cmd( ["nova", "service-list", "--host", node.data['hostname']], controller) services = nova.nova_stdout_parser(services_stdout) for service in services: nova.run_nova_cmd( ["nova", "service-delete", service['Id']], controller, output=False)
def test_nova_runner_call(mocker, cmd, call_output): env = mock.Mock() env.get_attributes.return_value = {"editable": {}} node = mock.Mock(data={"id": 1, "ip": "1.2.3.4"}, env=env) if call_output: ssh_call_mock = mocker.patch("octane.util.ssh.call_output") else: ssh_call_mock = mocker.patch("octane.util.ssh.call") nova.run_nova_cmd(cmd, node, call_output) ssh_call_mock.assert_called_once_with( ['sh', '-c', '. /root/openrc; ' + ' '.join(cmd)], node=node)
def shutoff_vms(self): controller = env_util.get_one_controller(self.env) node_fqdn = node_util.get_nova_node_handle(self.node) if nova.do_nova_instances_exist(controller, node_fqdn, "ERROR"): raise Exception( "There are instances in ERROR state on {hostname}," "please fix this problem and start upgrade_node " "command again".format(hostname=node_fqdn)) for instance_id in nova.get_active_instances(controller, node_fqdn): nova.run_nova_cmd( ["nova", "stop", instance_id], controller, output=False) nova.waiting_for_status_completed(controller, node_fqdn, "ACTIVE")
def postdeploy(self): self.restore_iscsi_initiator_info() controller = env_util.get_one_controller(self.env) # FIXME: Add more correct handling of case # when node may have not full name in services data try: call_host = self.node.data['fqdn'] nova.run_nova_cmd( ["nova", "service-enable", call_host, "nova-compute"], controller, False) except subprocess.CalledProcessError as exc: LOG.warn("Cannot start service 'nova-compute' on {0} " "by reason: {1}. Try again".format( self.node.data['fqdn'], exc)) call_host = self.node.data['fqdn'].split('.', 1)[0] nova.run_nova_cmd( ["nova", "service-enable", call_host, "nova-compute"], controller, False)
def evacuate_host(self): controller = env_util.get_one_controller(self.env) enabled_computes, disabled_computes = nova.get_compute_lists( controller) node_fqdn = node_util.get_nova_node_handle(self.node) if [node_fqdn] == enabled_computes: raise Exception("You try to disable last enabled nova-compute " "service on {hostname} in cluster. " "This leads to disable host evacuation. " "Fix this problem and run unpgrade-node " "command again".format(hostname=node_fqdn)) if nova.do_nova_instances_exist(controller, node_fqdn, "ERROR"): raise Exception( "There are instances in ERROR state on {hostname}," "please fix this problem and start upgrade_node " "command again".format(hostname=node_fqdn)) if node_fqdn in disabled_computes: LOG.warn("Node {0} already disabled".format(node_fqdn)) else: nova.run_nova_cmd( ["nova", "service-disable", node_fqdn, "nova-compute"], controller, False) for instance_id in nova.get_active_instances(controller, node_fqdn): nova.run_nova_cmd( ["nova", "live-migration", instance_id], controller, False) nova.waiting_for_status_completed( controller, node_fqdn, "MIGRATING") if nova.do_nova_instances_exist(controller, node_fqdn): raise Exception( "There are instances on {hostname} after host-evacuation, " "please fix this problem and start upgrade_node " "command again".format(hostname=node_fqdn))