def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list(None, "id", nodes_ids, order_by="id") objects.NodeCollection.lock_for_update(nodes).all() objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug("Nodes to provision: {0}".format(" ".join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name="provision") task_provision.node_ids = nodes_ids # node_ids参数在安装成功时候无用,但在安装失败的时候需要用到 db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.InstallosTask, nodes_to_provision, method_name="message" ) task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast("naily", provision_message) logger.info(u"消息发送完毕") return task_provision
def execute(self, nodes_to_deployment): TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format( ' '.join([n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) db().commit() deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def test_verify_networks_resp_empty_nodes_custom_error(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2), nets_sent), 'offline': 0, } } self.db.add(task) self.db.commit() error_msg = 'Custom error message.' kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': [], 'error': error_msg} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") self.assertEqual(task.message, error_msg)
def test_verify_networks_resp_extra_nodes_error(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes node3 = self.env.create_node(api=False) nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': nets_sent}] } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{'uid': node3.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': nets_sent}, {'uid': node1.id, 'networks': nets_sent}]} self.receiver.verify_networks_resp(**kwargs) self.db.refresh(task) self.assertEquals(task.status, "ready") self.assertEquals(task.message, '')
def test_verify_networks_resp(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="verify_networks", cluster_id=cluster_db.id ) task.cache = { "args": { "nodes": [{'uid': node1.id, 'networks': nets}, {'uid': node2.id, 'networks': nets}] } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{'uid': node1.id, 'networks': nets}, {'uid': node2.id, 'networks': nets}]} self.receiver.verify_networks_resp(**kwargs) self.db.refresh(task) self.assertEqual(task.status, "ready") self.assertEqual(task.message, '')
def test_verify_networks_resp_empty_nodes_custom_error(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': nets_sent}] } } self.db.add(task) self.db.commit() error_msg = 'Custom error message.' kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': [], 'error': error_msg} self.receiver.verify_networks_resp(**kwargs) self.db.refresh(task) self.assertEqual(task.status, "error") self.assertEqual(task.message, error_msg)
def test_verify_networks_resp_incomplete_network_data_on_first_node(self): """First node network data incompletion causes task fail""" self.env.create( cluster_kwargs={}, nodes_kwargs=[{"api": False, "name": "node1"}, {"api": False, "name": "node2"}] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}] task = Task(name="super", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}} self.db.add(task) self.db.commit() kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), [])} kwargs["nodes"][1]["networks"] = nets_sent self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") self.assertEqual(task.message, "") error_nodes = [ { "uid": node1.id, "interface": "eth0", "name": node1.name, "mac": node1.interfaces[0].mac, "absent_vlans": sorted(nets_sent[0]["vlans"]), } ] task.result[0]["absent_vlans"] = sorted(task.result[0]["absent_vlans"]) self.assertEqual(task.result, error_nodes)
def test_verify_networks_resp_partially_without_vlans(self): """Verify that network verification partially without vlans passes """ self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': [0]}, {'iface': 'eth1', 'vlans': range(100, 104)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': nets_sent}] } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': nets_sent}]} self.receiver.verify_networks_resp(**kwargs) self.db.refresh(task) self.assertEqual(task.status, "ready")
def test_verify_networks_resp_extra_nodes_error(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes node3 = self.env.create_node(api=False) nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2), nets_sent), 'offline': 0, } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2, node3), nets_sent)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "ready") self.assertEqual(task.message, '')
def test_verify_networks_resp(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="verify_networks", cluster_id=cluster_db.id ) task.cache = { "args": { "nodes": self.nodes_message((node1, node2), nets), "offline": 0, } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2), nets)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "ready") self.assertEqual(task.message, '')
def execute(self, nodes_to_deployment): # locking nodes for update objects.NodeCollection.lock_nodes(nodes_to_deployment) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format(' '.join( [n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def test_verify_networks_resp_error(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}] nets_resp = [{"iface": "eth0", "vlans": range(100, 104)}] task = Task(name="super", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}} self.db.add(task) self.db.commit() kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_resp)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") error_nodes = [] for node in self.env.nodes: error_nodes.append( { "uid": node.id, "interface": "eth0", "name": node.name, "absent_vlans": [104], "mac": node.interfaces[0].mac, } ) self.assertEqual(task.message, "") self.assertEqual(task.result, error_nodes)
def execute(self, nodes_to_deployment): TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format(' '.join( [n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) db().commit() deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def test_verify_networks_resp_partially_without_vlans(self): """Verify that network verification partially without vlans passes""" self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': [0]}, {'iface': 'eth1', 'vlans': range(100, 104)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2), nets_sent), 'offline': 0, } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2), nets_sent)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "ready")
def test_verify_networks_error_and_notice_are_concatenated(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets = [{"iface": "eth0", "vlans": range(100, 105)}] task = Task(name="verify_networks", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets), "offline": 2}} self.db.add(task) self.db.flush() custom_error = "CustomError" kwargs = { "task_uuid": task.uuid, "status": "error", "nodes": self.nodes_message((node1, node2), nets), "error": custom_error, } self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") offline_notice = ( "Notice: 2 node(s) were offline during connectivity" " check so they were skipped from the check." ) self.assertEqual(task.message, "\n".join((custom_error, offline_notice)))
def test_verify_networks_resp_without_vlans_only_erred(self): """Net verification without vlans fails when not all info received""" self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{"iface": "eth0", "vlans": [0]}] nets_resp = [{"iface": "eth0", "vlans": []}] task = Task(name="super", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}} self.db.add(task) self.db.commit() kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_resp)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") error_nodes = [ { "uid": node1.id, "interface": "eth0", "name": node1.name, "mac": node1.interfaces[0].mac, "absent_vlans": nets_sent[0]["vlans"], }, { "uid": node2.id, "interface": "eth0", "name": node2.name, "mac": node2.interfaces[0].mac, "absent_vlans": nets_sent[0]["vlans"], }, ] self.assertEqual(task.result, error_nodes)
def execute(self, nodes_to_deployment): # locking nodes for update objects.NodeCollection.lock_nodes(nodes_to_deployment) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug("Nodes to deploy: {0}".format(" ".join([n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name="deployment", cluster=self.cluster) db().add(task_deployment) deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name="message" ) db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = "deploying" node.progress = 0 db().commit() rpc.cast("naily", deployment_message) return task_deployment
def test_verify_networks_resp_forgotten_node_error(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False, "name": "node1"}, {"api": False, "name": "node2"}, {"api": False, "name": "node3"}, ], ) cluster_db = self.env.clusters[0] node1, node2, node3 = self.env.nodes nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}] task = Task(name="super", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2, node3), nets_sent), "offline": 0}} self.db.add(task) self.db.commit() kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_sent)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") self.assertRegexpMatches(task.message, node3.name) self.assertEqual(task.result, {})
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format( ' '.join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision', cluster=self.cluster) db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) db().refresh(task_provision) task_provision.cache = provision_message for node in nodes_to_provision: node.pending_addition = False node.status = 'provisioning' node.progress = 0 db().commit() rpc.cast('naily', provision_message) return task_provision
def test_verify_networks_with_dhcp_subtask_erred(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}] task = Task(name="verify_networks", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}} self.db.add(task) self.db.commit() dhcp_subtask = Task( name="check_dhcp", cluster_id=cluster_db.id, parent_id=task.id, status="error", message="DHCP ERROR" ) self.db.add(dhcp_subtask) self.db.commit() kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), [])} kwargs["nodes"][0]["networks"] = nets_sent self.receiver.verify_networks_resp(**kwargs) self.assertEqual(task.status, "error") self.assertEqual(task.message, u"DHCP ERROR") task.result[0]["absent_vlans"] = sorted(task.result[0]["absent_vlans"]) self.assertEqual( task.result, [ { u"absent_vlans": [100, 101, 102, 103, 104], u"interface": "eth0", u"mac": node2.interfaces[0].mac, u"name": "Untitled ({0})".format(node2.mac[-5:].lower()), u"uid": node2.id, } ], )
def test_verify_networks_resp_error_with_removed_node(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}] nets_resp = [{"iface": "eth0", "vlans": range(100, 104)}] task = Task(name="super", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}} self.db.add(task) self.db.commit() kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_resp)} self.db.delete(node2) self.db.commit() self.receiver.verify_networks_resp(**kwargs) self.db.flush() resp = self.app.get(reverse("TaskHandler", kwargs={"obj_id": task.id}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) task = resp.json_body self.assertEqual(task["status"], "error") error_nodes = [ { "uid": node1.id, "interface": "eth0", "name": node1.name, "absent_vlans": [104], "mac": node1.interfaces[0].mac, }, {"uid": node2.id, "interface": "eth0", "name": node2.name, "absent_vlans": [104], "mac": "unknown"}, ] self.assertEqual(task.get("message"), "") self.assertEqual(task["result"], error_nodes)
def test_verify_networks_resp_without_vlans_only(self): """Verify that network verification without vlans passes when there only iface without vlans configured """ self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': [0]}, {'iface': 'eth1', 'vlans': [0]}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2), nets_sent) } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2), nets_sent)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "ready")
def test_verify_networks_resp_forgotten_node_error(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False, 'name': 'node1'}, {"api": False, 'name': 'node2'}, {"api": False, 'name': 'node3'} ] ) cluster_db = self.env.clusters[0] node1, node2, node3 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2, node3), nets_sent) } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2), nets_sent)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") self.assertRegexpMatches(task.message, node3.name) self.assertEqual(task.result, {})
def test_verify_networks_resp_incomplete_network_data_error(self): # One node has single interface meta = self.env.default_metadata() mac = '02:07:43:78:4F:58' self.env.set_interfaces_in_meta( meta, [{'name': 'eth0', 'mac': mac}]) self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False, 'name': 'node1'}, {"api": False, 'name': 'node2', 'meta': meta}, {"api": False, 'name': 'node3'} ] ) cluster_db = self.env.clusters[0] node1, node2, node3 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}, {'iface': 'eth1', 'vlans': [106]}, {'iface': 'eth2', 'vlans': [107]}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': nets_sent}, {'uid': node3.id, 'networks': nets_sent}] } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': []}, {'uid': node3.id, 'networks': nets_sent}]} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") self.assertEqual(task.message, '') error_nodes = [{'uid': node2.id, 'interface': 'eth0', 'name': node2.name, 'mac': node2.interfaces[0].mac, 'absent_vlans': nets_sent[0]['vlans']}, {'uid': node2.id, 'interface': 'eth1', 'name': node2.name, 'mac': 'unknown', 'absent_vlans': nets_sent[1]['vlans']}, {'uid': node2.id, 'interface': 'eth2', 'name': node2.name, 'mac': 'unknown', 'absent_vlans': nets_sent[2]['vlans']} ] self.assertEqual(task.result, error_nodes)
def test_verify_networks_with_dhcp_subtask_erred(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{ "api": False }, { "api": False }]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task(name="verify_networks", cluster_id=cluster_db.id) task.cache = { "args": { 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }] } } self.db.add(task) self.db.commit() dhcp_subtask = Task(name='check_dhcp', cluster_id=cluster_db.id, parent_id=task.id, status='error', message='DHCP ERROR') self.db.add(dhcp_subtask) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': [] }] } self.receiver.verify_networks_resp(**kwargs) self.assertEqual(task.status, "error") self.assertEqual(task.message, u'DHCP ERROR') self.assertEqual( task.result, [{ u'absent_vlans': [100, 101, 102, 103, 104], u'interface': 'eth0', u'mac': node2.interfaces[0].mac, u'name': 'Untitled ({0})'.format(node2.mac[-5:].lower()), u'uid': node2.id }])
def test_verify_networks_resp_incomplete_network_data_error(self): # One node has single interface meta = self.env.default_metadata() mac = '02:07:43:78:4F:58' self.env.set_interfaces_in_meta( meta, [{'name': 'eth0', 'mac': mac}]) self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False, 'name': 'node1'}, {"api": False, 'name': 'node2', 'meta': meta}, {"api": False, 'name': 'node3'} ] ) cluster_db = self.env.clusters[0] node1, node2, node3 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}, {'iface': 'eth1', 'vlans': [106]}, {'iface': 'eth2', 'vlans': [107]}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': nets_sent}, {'uid': node3.id, 'networks': nets_sent}] } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': []}, {'uid': node3.id, 'networks': nets_sent}]} self.receiver.verify_networks_resp(**kwargs) self.db.refresh(task) self.assertEqual(task.status, "error") self.assertEqual(task.message, '') error_nodes = [{'uid': node2.id, 'interface': 'eth0', 'name': node2.name, 'mac': node2.interfaces[0].mac, 'absent_vlans': nets_sent[0]['vlans']}, {'uid': node2.id, 'interface': 'eth1', 'name': node2.name, 'mac': 'unknown', 'absent_vlans': nets_sent[1]['vlans']}, {'uid': node2.id, 'interface': 'eth2', 'name': node2.name, 'mac': 'unknown', 'absent_vlans': nets_sent[2]['vlans']} ] self.assertEqual(task.result, error_nodes)
def test_verify_networks_resp_error(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{ "api": False }, { "api": False }]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] nets_resp = [{'iface': 'eth0', 'vlans': range(100, 104)}] task = Task(name="super", cluster_id=cluster_db.id) task.cache = { "args": { 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }] } } self.db.add(task) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{ 'uid': node1.id, 'networks': nets_resp }, { 'uid': node2.id, 'networks': nets_resp }] } self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") error_nodes = [] for node in self.env.nodes: error_nodes.append({ 'uid': node.id, 'interface': 'eth0', 'name': node.name, 'absent_vlans': [104], 'mac': node.interfaces[0].mac }) self.assertEqual(task.message, '') self.assertEqual(task.result, error_nodes)
def test_verify_networks_resp_error_with_removed_node(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] nets_resp = [{'iface': 'eth0', 'vlans': range(100, 104)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2), nets_sent), 'offline': 0, } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2), nets_resp)} self.db.delete(node2) self.db.commit() self.receiver.verify_networks_resp(**kwargs) self.db.flush() resp = self.app.get( reverse('TaskHandler', kwargs={'obj_id': task.id}), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) task = resp.json_body self.assertEqual(task['status'], "error") error_nodes = [{'uid': node1.id, 'interface': 'eth0', 'name': node1.name, 'absent_vlans': [104], 'mac': node1.interfaces[0].mac}, {'uid': node2.id, 'interface': 'eth0', 'name': node2.name, 'absent_vlans': [104], 'mac': 'unknown'}] self.assertEqual(task.get('message'), '') self.assertEqual(task['result'], error_nodes)
def test_verify_networks_resp_forgotten_node_error(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{ "api": False, 'name': 'node1' }, { "api": False, 'name': 'node2' }, { "api": False, 'name': 'node3' }]) cluster_db = self.env.clusters[0] node1, node2, node3 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task(name="super", cluster_id=cluster_db.id) task.cache = { "args": { 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }, { 'uid': node3.id, 'networks': nets_sent }] } } self.db.add(task) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }] } self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") self.assertRegexpMatches(task.message, node3.name) self.assertEqual(task.result, {})
def execute(self): if not self.cluster.pending_release_id: raise errors.InvalidReleaseId( u"Can't update environment '{0}' when " u"new release Id is invalid".format(self.cluster.name)) running_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, status='running' ).filter( Task.name.in_([ 'deploy', 'deployment', 'reset_environment', 'stop_deployment' ]) ) if running_tasks.first(): raise errors.TaskAlreadyRunning( u"Can't update environment '{0}' when " u"other task is running".format( self.cluster.id ) ) nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_change) logger.debug('Nodes to update: {0}'.format( ' '.join([n.fqdn for n in nodes_to_change]))) task_update = Task(name='update', cluster=self.cluster) db().add(task_update) self.cluster.status = 'update' db().flush() deployment_message = self._call_silently( task_update, tasks.UpdateTask, nodes_to_change, method_name='message') db().refresh(task_update) task_update.cache = deployment_message for node in nodes_to_change: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_update
def test_verify_networks_resp_without_vlans_only(self): """Verify that network verification without vlans passes when there only iface without vlans configured """ self.env.create(cluster_kwargs={}, nodes_kwargs=[{ "api": False }, { "api": False }]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{ 'iface': 'eth0', 'vlans': [0] }, { 'iface': 'eth1', 'vlans': [0] }] task = Task(name="super", cluster_id=cluster_db.id) task.cache = { "args": { 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }] } } self.db.add(task) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }] } self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "ready")
def execute(self, nodes_to_provision, **kwargs): """Run provisioning task on specified nodes.""" # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list( None, 'id', nodes_ids, order_by='id' ) logger.debug('Nodes to provision: {0}'.format( ' '.join([objects.Node.get_node_fqdn(n) for n in nodes_to_provision]))) task_provision = Task(name=consts.TASK_NAMES.provision, status=consts.TASK_STATUSES.pending, cluster=self.cluster) db().add(task_provision) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) task_provision = objects.Task.get_by_uid( task_provision.id, fail_if_not_found=True, lock_for_update=True ) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = consts.NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast('naily', provision_message) return task_provision
def test_verify_networks_with_dhcp_subtask_erred(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="verify_networks", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2), nets_sent), 'offline': 0, } } self.db.add(task) self.db.commit() dhcp_subtask = Task( name='check_dhcp', cluster_id=cluster_db.id, parent_id=task.id, status='error', message='DHCP ERROR' ) self.db.add(dhcp_subtask) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2), [])} kwargs['nodes'][0]['networks'] = nets_sent self.receiver.verify_networks_resp(**kwargs) self.assertEqual(task.status, "error") self.assertEqual(task.message, u'DHCP ERROR') task.result[0]['absent_vlans'] = sorted(task.result[0]['absent_vlans']) self.assertEqual(task.result, [{ u'absent_vlans': [100, 101, 102, 103, 104], u'interface': 'eth0', u'mac': node2.interfaces[0].mac, u'name': 'Untitled ({0})'.format(node2.mac[-5:].lower()), u'uid': node2.id}])
def test_verify_networks_with_dhcp_subtask(self): """Test verifies that when dhcp subtask is ready and verify_networks errored - verify_networks will be in error """ self.env.create(cluster_kwargs={}, nodes_kwargs=[{ "api": False }, { "api": False }]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task(name="verify_networks", cluster_id=cluster_db.id) task.cache = { "args": { 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }] } } self.db.add(task) self.db.commit() dhcp_subtask = Task(name='check_dhcp', cluster_id=cluster_db.id, parent_id=task.id, status='ready') self.db.add(dhcp_subtask) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': [] }] } self.receiver.verify_networks_resp(**kwargs) self.assertEqual(task.status, "error")
def test_verify_networks_resp_without_vlans_only_erred(self): """Verify that network verification without vlans fails when not all sended info received """ self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': [0]}] nets_resp = [{'iface': 'eth0', 'vlans': []}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2), nets_sent), 'offline': 0, } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2), nets_resp)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") error_nodes = [{'uid': node1.id, 'interface': 'eth0', 'name': node1.name, 'mac': node1.interfaces[0].mac, 'absent_vlans': nets_sent[0]['vlans']}, {'uid': node2.id, 'interface': 'eth0', 'name': node2.name, 'mac': node2.interfaces[0].mac, 'absent_vlans': nets_sent[0]['vlans']}] self.assertEqual(task.result, error_nodes)
def test_verify_networks_with_dhcp_subtask_erred(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="verify_networks", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': nets_sent}] } } self.db.add(task) self.db.commit() dhcp_subtask = Task( name='check_dhcp', cluster_id=cluster_db.id, parent_id=task.id, status='error', message='DHCP ERROR' ) self.db.add(dhcp_subtask) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{'uid': node1.id, 'networks': nets_sent}, {'uid': node2.id, 'networks': []}]} self.receiver.verify_networks_resp(**kwargs) self.assertEqual(task.status, "error") self.assertEqual(task.message, u'DHCP ERROR') self.assertEqual(task.result, [{ u'absent_vlans': [100, 101, 102, 103, 104], u'interface': 'eth0', u'mac': node2.interfaces[0].mac, u'name': None, u'uid': node2.id}])
def test_verify_networks_resp_partially_without_vlans(self): """Verify that network verification partially without vlans passes""" self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{"iface": "eth0", "vlans": [0]}, {"iface": "eth1", "vlans": range(100, 104)}] task = Task(name="super", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}} self.db.add(task) self.db.commit() kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_sent)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "ready")
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list( None, 'id', nodes_ids, order_by='id' ) objects.NodeCollection.lock_for_update(nodes).all() objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format( ' '.join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision') db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.InstallosTask, nodes_to_provision, method_name='message' ) task_provision = objects.Task.get_by_uid( task_provision.id, fail_if_not_found=True, lock_for_update=True ) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast('naily', provision_message) logger.info(u'消息发送完毕') return task_provision
def test_verify_networks_resp_incomplete_network_data_on_first_node(self): """Test verifies that when network data is incomplete on first node task would not fail and be erred as expected """ self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False, 'name': 'node1'}, {"api": False, 'name': 'node2'}, ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': self.nodes_message((node1, node2), nets_sent), 'offline': 0, } } self.db.add(task) self.db.commit() kwargs = {'task_uuid': task.uuid, 'status': 'ready', 'nodes': self.nodes_message((node1, node2), [])} kwargs['nodes'][1]['networks'] = nets_sent self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") self.assertEqual(task.message, '') error_nodes = [{'uid': node1.id, 'interface': 'eth0', 'name': node1.name, 'mac': node1.interfaces[0].mac, 'absent_vlans': sorted(nets_sent[0]['vlans'])}] task.result[0]['absent_vlans'] = sorted(task.result[0]['absent_vlans']) self.assertEqual(task.result, error_nodes)
def execute(self, filters, force=False, graph_type=None, **kwargs): self.check_running_task(consts.TASK_NAMES.deployment) task = Task(name=consts.TASK_NAMES.deployment, cluster=self.cluster, status=consts.TASK_STATUSES.pending) db().add(task) nodes_to_update = objects.Cluster.get_nodes_to_update_config( self.cluster, filters.get('node_ids'), filters.get('node_role')) message = self._call_silently( task, self.get_deployment_task(), nodes_to_update, graph_type=graph_type, method_name='message', force=force ) # locking task task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) if task.is_completed(): return task # locking nodes objects.NodeCollection.lock_nodes(nodes_to_update) task.cache = copy.copy(message) task.cache['nodes'] = [n.id for n in nodes_to_update] for node in nodes_to_update: node.status = consts.NODE_STATUSES.deploying node.progress = 0 db().commit() rpc.cast('naily', message) return task
def execute(self): if not self.cluster.pending_release_id: raise errors.InvalidReleaseId( u"Can't update environment '{0}' when " u"new release Id is invalid".format(self.cluster.name)) running_tasks = db().query(Task).filter_by(cluster_id=self.cluster.id, status='running').filter( Task.name.in_([ 'deploy', 'deployment', 'reset_environment', 'stop_deployment' ])) if running_tasks.first(): raise errors.TaskAlreadyRunning( u"Can't update environment '{0}' when " u"other task is running".format(self.cluster.id)) nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_change) logger.debug('Nodes to update: {0}'.format(' '.join( [n.fqdn for n in nodes_to_change]))) task_update = Task(name='update', cluster=self.cluster) db().add(task_update) self.cluster.status = 'update' db().flush() deployment_message = self._call_silently(task_update, tasks.UpdateTask, nodes_to_change, method_name='message') db().refresh(task_update) task_update.cache = deployment_message for node in nodes_to_change: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_update
def test_get_task_cache(self): expected = {"key": "value"} task = Task() task.cache = expected self.db.add(task) self.db.flush() actual = TaskHelper.get_task_cache(task) self.assertDictEqual(expected, actual) task_from_db = objects.Task.get_by_uuid(task.uuid) self.db.delete(task_from_db) self.db.flush() expected = {} actual = TaskHelper.get_task_cache(task) self.assertDictEqual(expected, actual)
def test_verify_networks_resp(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets = [{"iface": "eth0", "vlans": range(100, 105)}] task = Task(name="verify_networks", cluster_id=cluster_db.id) task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets), "offline": 0}} self.db.add(task) self.db.commit() kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets)} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "ready") self.assertEqual(task.message, "")
def execute(self, filters, force=False, **kwargs): self.check_running_task(consts.TASK_NAMES.deployment) task = Task(name=consts.TASK_NAMES.deployment, cluster=self.cluster, status=consts.TASK_STATUSES.pending) db().add(task) nodes_to_update = objects.Cluster.get_nodes_to_update_config( self.cluster, filters.get('node_ids'), filters.get('node_role')) message = self._call_silently( task, self.get_deployment_task(), nodes_to_update, method_name='message', force=force ) # locking task task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) if task.is_completed(): return task # locking nodes objects.NodeCollection.lock_nodes(nodes_to_update) task.cache = copy.copy(message) task.cache['nodes'] = [n.id for n in nodes_to_update] for node in nodes_to_update: node.status = consts.NODE_STATUSES.deploying node.progress = 0 db().commit() rpc.cast('naily', message) return task
def execute(self, nodes_to_deployment, deployment_tasks=None, graph_type=None, force=False, **kwargs): deployment_tasks = deployment_tasks or [] logger.debug('Nodes to deploy: {0}'.format( ' '.join([objects.Node.get_node_fqdn(n) for n in nodes_to_deployment]))) task_deployment = Task( name=consts.TASK_NAMES.deployment, cluster=self.cluster, status=consts.TASK_STATUSES.pending ) db().add(task_deployment) deployment_message = self._call_silently( task_deployment, self.get_deployment_task(), nodes_to_deployment, deployment_tasks=deployment_tasks, method_name='message', graph_type=graph_type, force=force) db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid( task_deployment.id, fail_if_not_found=True, lock_for_update=True ) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list(None, 'id', nodes_ids, order_by='id') objects.NodeCollection.lock_for_update(nodes).all() objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format(' '.join( [n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision') task_provision.node_ids = nodes_ids #node_ids参数在安装成功时候无用,但在安装失败的时候需要用到 db().add(task_provision) db().commit() provision_message = self._call_silently(task_provision, tasks.InstallosTask, nodes_to_provision, method_name='message') task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast('naily', provision_message) logger.info(u'消息发送完毕') return task_provision
def test_verify_networks_error_and_notice_are_concatenated(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False}, ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task( name="verify_networks", cluster_id=cluster_db.id ) task.cache = { "args": { "nodes": self.nodes_message((node1, node2), nets), "offline": 2, } } self.db.add(task) self.db.flush() custom_error = 'CustomError' kwargs = {'task_uuid': task.uuid, 'status': 'error', 'nodes': self.nodes_message((node1, node2), nets), 'error': custom_error} self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "error") offline_notice = 'Notice: 2 node(s) were offline during connectivity' \ ' check so they were skipped from the check.' self.assertEqual(task.message, '\n'.join((custom_error, offline_notice)))
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes Constraints: currently this task cannot deploy RedHat. For redhat here should be added additional tasks e.i. check credentials, check licenses, redhat downloading. Status of this task you can track here: https://blueprints.launchpad.net/fuel/+spec /nailgun-separate-provisioning-for-redhat """ TaskHelper.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format( ' '.join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision', cluster=self.cluster) db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) db().refresh(task_provision) task_provision.cache = provision_message for node in nodes_to_provision: node.pending_addition = False node.status = 'provisioning' node.progress = 0 db().commit() rpc.cast('naily', provision_message) return task_provision
def test_get_task_cache(self): expected = {"key": "value"} task = Task() task.cache = expected self.db.add(task) self.db.flush() actual = TaskHelper.get_task_cache(task) self.assertDictEqual(expected, actual) # NOTE: We need to expire 'cache' attribute because otherwise # the 'task.cache' won't throw 'ObjectDeletedError' and # will be unable to test 'get_task_cache'. self.db.expire(task, ['cache']) task_from_db = objects.Task.get_by_uuid(task.uuid) self.db.delete(task_from_db) self.db.flush() expected = {} actual = TaskHelper.get_task_cache(task) self.assertDictEqual(expected, actual)
def test_verify_networks_with_excluded_networks(self): """Verify that network verification can exclude interfaces""" self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"api": False}, {"api": False} ] ) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': [0]}, {'iface': 'eth1', 'vlans': range(100, 104)}] nets_excluded = [{'iface': 'eth3'}, {'iface': 'eth4'}] task = Task( name="super", cluster_id=cluster_db.id ) task.cache = { "args": { 'nodes': [ { 'uid': node1.id, 'name': node1.name, 'status': node1.status, 'networks': nets_sent, 'excluded_networks': nets_excluded }, { 'uid': node2.id, 'name': node2.name, 'status': node2.status, 'networks': nets_sent, 'excluded_networks': nets_excluded } ], 'offline': 0, } } self.db.add(task) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', 'nodes': [ { 'uid': node1.id, 'name': node1.name, 'status': node1.status, 'networks': nets_sent, 'excluded_networks': nets_excluded }, { 'uid': node2.id, 'name': node2.name, 'status': node2.status, 'networks': nets_sent, 'excluded_networks': nets_excluded } ] } self.receiver.verify_networks_resp(**kwargs) self.db.flush() self.db.refresh(task) self.assertEqual(task.status, "ready") expected_message = 'Notice: some interfaces were skipped from' \ ' connectivity checking because this version' \ ' of Fuel cannot establish LACP on Bootstrap' \ ' nodes. Only interfaces of successfully' \ ' deployed nodes may be checked with LACP' \ ' enabled. The list of skipped interfaces:' \ ' node {0} [eth3, eth4], node {1} [eth3, eth4].' \ .format(node1.name, node2.name) self.assertEqual(task.message, expected_message)