示例#1
0
    def test_verify_networks_resp(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(
            name="verify_networks",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                "nodes": [{'uid': node1.id, 'networks': nets},
                          {'uid': node2.id, 'networks': nets}]
            }
        }
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': [{'uid': node1.id, 'networks': nets},
                            {'uid': node2.id, 'networks': nets}]}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.refresh(task)
        self.assertEqual(task.status, "ready")
        self.assertEqual(task.message, '')
示例#2
0
    def test_verify_networks_resp_empty_nodes_custom_error(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': [{'uid': node1.id, 'networks': nets_sent},
                          {'uid': node2.id, 'networks': nets_sent}]
            }
        }
        self.db.add(task)
        self.db.commit()

        error_msg = 'Custom error message.'
        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': [],
                  'error': error_msg}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        self.assertEqual(task.message, error_msg)
示例#3
0
    def test_verify_networks_resp_partially_without_vlans(self):
        """Verify that network verification partially without vlans passes
        """
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': [0]},
                     {'iface': 'eth1', 'vlans': range(100, 104)}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': [{'uid': node1.id, 'networks': nets_sent},
                          {'uid': node2.id, 'networks': nets_sent}]
            }
        }
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': [{'uid': node1.id, 'networks': nets_sent},
                            {'uid': node2.id, 'networks': nets_sent}]}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.refresh(task)
        self.assertEqual(task.status, "ready")
示例#4
0
    def test_verify_networks_resp_forgotten_node_error(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False, "name": "node1"},
                {"api": False, "name": "node2"},
                {"api": False, "name": "node3"},
            ],
        )
        cluster_db = self.env.clusters[0]
        node1, node2, node3 = self.env.nodes
        nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}]

        task = Task(name="super", cluster_id=cluster_db.id)
        task.cache = {"args": {"nodes": self.nodes_message((node1, node2, node3), nets_sent), "offline": 0}}
        self.db.add(task)
        self.db.commit()

        kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_sent)}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        self.assertRegexpMatches(task.message, node3.name)
        self.assertEqual(task.result, {})
示例#5
0
    def execute(self, nodes_to_provision):
        """Run provisioning task on specified nodes
        """
        # locking nodes
        nodes_ids = [node.id for node in nodes_to_provision]
        nodes = objects.NodeCollection.filter_by_list(None, "id", nodes_ids, order_by="id")
        objects.NodeCollection.lock_for_update(nodes).all()

        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
        logger.debug("Nodes to provision: {0}".format(" ".join([n.fqdn for n in nodes_to_provision])))

        task_provision = Task(name="provision")
        task_provision.node_ids = nodes_ids
        # node_ids参数在安装成功时候无用,但在安装失败的时候需要用到
        db().add(task_provision)
        db().commit()

        provision_message = self._call_silently(
            task_provision, tasks.InstallosTask, nodes_to_provision, method_name="message"
        )

        task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True)
        task_provision.cache = provision_message
        objects.NodeCollection.lock_for_update(nodes).all()

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = NODE_STATUSES.provisioning
            node.progress = 0

        db().commit()

        rpc.cast("naily", provision_message)
        logger.info(u"消息发送完毕")
        return task_provision
示例#6
0
    def test_verify_networks_resp_error(self):
        self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}])
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}]
        nets_resp = [{"iface": "eth0", "vlans": range(100, 104)}]

        task = Task(name="super", cluster_id=cluster_db.id)
        task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}}
        self.db.add(task)
        self.db.commit()

        kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_resp)}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        error_nodes = []
        for node in self.env.nodes:
            error_nodes.append(
                {
                    "uid": node.id,
                    "interface": "eth0",
                    "name": node.name,
                    "absent_vlans": [104],
                    "mac": node.interfaces[0].mac,
                }
            )
        self.assertEqual(task.message, "")
        self.assertEqual(task.result, error_nodes)
示例#7
0
    def test_verify_networks_error_and_notice_are_concatenated(self):
        self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}])
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets = [{"iface": "eth0", "vlans": range(100, 105)}]

        task = Task(name="verify_networks", cluster_id=cluster_db.id)
        task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets), "offline": 2}}
        self.db.add(task)
        self.db.flush()

        custom_error = "CustomError"
        kwargs = {
            "task_uuid": task.uuid,
            "status": "error",
            "nodes": self.nodes_message((node1, node2), nets),
            "error": custom_error,
        }

        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        offline_notice = (
            "Notice: 2 node(s) were offline during connectivity" " check so they were skipped from the check."
        )
        self.assertEqual(task.message, "\n".join((custom_error, offline_notice)))
示例#8
0
    def test_proper_progress_calculation(self):
        supertask = Task(uuid=str(uuid.uuid4()), name="super", status="running")

        self.db.add(supertask)
        self.db.commit()

        subtask_weight = 0.4
        task_deletion = supertask.create_subtask("node_deletion", weight=subtask_weight)
        task_provision = supertask.create_subtask("provision", weight=subtask_weight)

        subtask_progress = random.randint(1, 20)

        deletion_kwargs = {"task_uuid": task_deletion.uuid, "progress": subtask_progress, "status": "running"}
        provision_kwargs = {"task_uuid": task_provision.uuid, "progress": subtask_progress, "status": "running"}

        self.receiver.provision_resp(**provision_kwargs)
        self.db.commit()
        self.receiver.remove_nodes_resp(**deletion_kwargs)
        self.db.commit()

        self.db.refresh(task_deletion)
        self.db.refresh(task_provision)
        self.db.refresh(supertask)

        calculated_progress = helpers.TaskHelper.calculate_parent_task_progress([task_deletion, task_provision])

        self.assertEqual(supertask.progress, calculated_progress)
示例#9
0
    def test_verify_networks_resp_incomplete_network_data_on_first_node(self):
        """First node network data incompletion causes task fail"""
        self.env.create(
            cluster_kwargs={}, nodes_kwargs=[{"api": False, "name": "node1"}, {"api": False, "name": "node2"}]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}]

        task = Task(name="super", cluster_id=cluster_db.id)
        task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}}

        self.db.add(task)
        self.db.commit()

        kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), [])}
        kwargs["nodes"][1]["networks"] = nets_sent
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        self.assertEqual(task.message, "")
        error_nodes = [
            {
                "uid": node1.id,
                "interface": "eth0",
                "name": node1.name,
                "mac": node1.interfaces[0].mac,
                "absent_vlans": sorted(nets_sent[0]["vlans"]),
            }
        ]
        task.result[0]["absent_vlans"] = sorted(task.result[0]["absent_vlans"])

        self.assertEqual(task.result, error_nodes)
示例#10
0
    def execute(self, nodes_to_deployment):
        # locking nodes for update
        objects.NodeCollection.lock_nodes(nodes_to_deployment)
        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment)

        logger.debug("Nodes to deploy: {0}".format(" ".join([n.fqdn for n in nodes_to_deployment])))
        task_deployment = Task(name="deployment", cluster=self.cluster)
        db().add(task_deployment)

        deployment_message = self._call_silently(
            task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name="message"
        )

        db().refresh(task_deployment)

        # locking task
        task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True)
        # locking nodes
        objects.NodeCollection.lock_nodes(nodes_to_deployment)

        task_deployment.cache = deployment_message

        for node in nodes_to_deployment:
            node.status = "deploying"
            node.progress = 0

        db().commit()

        rpc.cast("naily", deployment_message)

        return task_deployment
示例#11
0
    def test_verify_networks_with_dhcp_subtask_erred(self):
        self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}])
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}]

        task = Task(name="verify_networks", cluster_id=cluster_db.id)
        task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}}
        self.db.add(task)
        self.db.commit()
        dhcp_subtask = Task(
            name="check_dhcp", cluster_id=cluster_db.id, parent_id=task.id, status="error", message="DHCP ERROR"
        )
        self.db.add(dhcp_subtask)
        self.db.commit()
        kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), [])}
        kwargs["nodes"][0]["networks"] = nets_sent
        self.receiver.verify_networks_resp(**kwargs)

        self.assertEqual(task.status, "error")
        self.assertEqual(task.message, u"DHCP ERROR")

        task.result[0]["absent_vlans"] = sorted(task.result[0]["absent_vlans"])
        self.assertEqual(
            task.result,
            [
                {
                    u"absent_vlans": [100, 101, 102, 103, 104],
                    u"interface": "eth0",
                    u"mac": node2.interfaces[0].mac,
                    u"name": "Untitled ({0})".format(node2.mac[-5:].lower()),
                    u"uid": node2.id,
                }
            ],
        )
    def test_task_contains_field_parent(self):
        parent_task = Task(
            name=consts.TASK_NAMES.deployment,
            cluster=self.cluster_db,
            status=consts.TASK_STATUSES.running,
            progress=10
        )
        child_task = parent_task.create_subtask(
            name=consts.TASK_NAMES.deployment,
            status=consts.TASK_STATUSES.running,
            progress=10
        )

        cluster_tasks = self.app.get(
            reverse(
                'TaskCollectionHandler',
                kwargs={'cluster_id': self.cluster_db.id}
            ),
            headers=self.default_headers
        ).json_body

        child_task_data = next(
            t for t in cluster_tasks if t['id'] == child_task.id
        )

        self.assertEqual(parent_task.id, child_task_data['parent_id'])
        parent_task_data = next(
            t for t in cluster_tasks if t['id'] == parent_task.id
        )
        self.assertIsNone(parent_task_data['parent_id'])
示例#13
0
    def test_verify_networks_resp_without_vlans_only(self):
        """Verify that network verification without vlans passes
        when there only iface without vlans configured
        """
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': [0]},
                     {'iface': 'eth1', 'vlans': [0]}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': self.nodes_message((node1, node2), nets_sent)
            }
        }
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': self.nodes_message((node1, node2), nets_sent)}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "ready")
示例#14
0
    def test_verify_networks_resp_error_with_removed_node(self):
        self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}])

        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{"iface": "eth0", "vlans": range(100, 105)}]
        nets_resp = [{"iface": "eth0", "vlans": range(100, 104)}]

        task = Task(name="super", cluster_id=cluster_db.id)
        task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}}
        self.db.add(task)
        self.db.commit()

        kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_resp)}
        self.db.delete(node2)
        self.db.commit()
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        resp = self.app.get(reverse("TaskHandler", kwargs={"obj_id": task.id}), headers=self.default_headers)
        self.assertEqual(resp.status_code, 200)
        task = resp.json_body
        self.assertEqual(task["status"], "error")
        error_nodes = [
            {
                "uid": node1.id,
                "interface": "eth0",
                "name": node1.name,
                "absent_vlans": [104],
                "mac": node1.interfaces[0].mac,
            },
            {"uid": node2.id, "interface": "eth0", "name": node2.name, "absent_vlans": [104], "mac": "unknown"},
        ]
        self.assertEqual(task.get("message"), "")
        self.assertEqual(task["result"], error_nodes)
示例#15
0
    def test_verify_networks_resp_forgotten_node_error(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False, 'name': 'node1'},
                {"api": False, 'name': 'node2'},
                {"api": False, 'name': 'node3'}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2, node3 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': self.nodes_message((node1, node2, node3), nets_sent)
            }
        }
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': self.nodes_message((node1, node2),
                                              nets_sent)}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        self.assertRegexpMatches(task.message, node3.name)
        self.assertEqual(task.result, {})
示例#16
0
    def execute(self, nodes_to_provision):
        """Run provisioning task on specified nodes
        """
        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)

        logger.debug('Nodes to provision: {0}'.format(
            ' '.join([n.fqdn for n in nodes_to_provision])))

        task_provision = Task(name='provision', cluster=self.cluster)
        db().add(task_provision)
        db().commit()

        provision_message = self._call_silently(
            task_provision,
            tasks.ProvisionTask,
            nodes_to_provision,
            method_name='message'
        )
        db().refresh(task_provision)

        task_provision.cache = provision_message

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = 'provisioning'
            node.progress = 0

        db().commit()

        rpc.cast('naily', provision_message)

        return task_provision
示例#17
0
    def execute(self, nodes_to_deployment):
        TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment)
        logger.debug('Nodes to deploy: {0}'.format(
            ' '.join([n.fqdn for n in nodes_to_deployment])))
        task_deployment = Task(name='deployment', cluster=self.cluster)
        db().add(task_deployment)
        db().commit()

        deployment_message = self._call_silently(
            task_deployment,
            tasks.DeploymentTask,
            nodes_to_deployment,
            method_name='message')

        db().refresh(task_deployment)

        task_deployment.cache = deployment_message

        for node in nodes_to_deployment:
            node.status = 'deploying'
            node.progress = 0

        db().commit()
        rpc.cast('naily', deployment_message)

        return task_deployment
    def test_verify_networks_resp_extra_nodes_error(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        node3 = self.env.create_node(api=False)
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': self.nodes_message((node1, node2), nets_sent),
                'offline': 0,
            }
        }
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': self.nodes_message((node1, node2, node3),
                                              nets_sent)}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "ready")
        self.assertEqual(task.message, '')
示例#19
0
    def test_verify_networks_resp_without_vlans_only_erred(self):
        """Net verification without vlans fails when not all info received"""
        self.env.create(cluster_kwargs={}, nodes_kwargs=[{"api": False}, {"api": False}])
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{"iface": "eth0", "vlans": [0]}]
        nets_resp = [{"iface": "eth0", "vlans": []}]

        task = Task(name="super", cluster_id=cluster_db.id)
        task.cache = {"args": {"nodes": self.nodes_message((node1, node2), nets_sent), "offline": 0}}
        self.db.add(task)
        self.db.commit()

        kwargs = {"task_uuid": task.uuid, "status": "ready", "nodes": self.nodes_message((node1, node2), nets_resp)}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        error_nodes = [
            {
                "uid": node1.id,
                "interface": "eth0",
                "name": node1.name,
                "mac": node1.interfaces[0].mac,
                "absent_vlans": nets_sent[0]["vlans"],
            },
            {
                "uid": node2.id,
                "interface": "eth0",
                "name": node2.name,
                "mac": node2.interfaces[0].mac,
                "absent_vlans": nets_sent[0]["vlans"],
            },
        ]
        self.assertEqual(task.result, error_nodes)
示例#20
0
    def test_node_deletion_subtask_progress(self):
        supertask = Task(
            uuid=str(uuid.uuid4()),
            name="super",
            status="running"
        )

        self.db.add(supertask)
        self.db.commit()

        task_deletion = supertask.create_subtask("node_deletion")
        task_provision = supertask.create_subtask("provision", weight=0.4)

        subtask_progress = random.randint(1, 20)

        deletion_kwargs = {'task_uuid': task_deletion.uuid,
                           'progress': subtask_progress}
        provision_kwargs = {'task_uuid': task_provision.uuid,
                            'progress': subtask_progress}

        def progress_difference():
            self.receiver.provision_resp(**provision_kwargs)

            self.db.refresh(task_provision)
            self.assertEqual(task_provision.progress, subtask_progress)

            self.db.refresh(supertask)
            progress_before_delete_subtask = supertask.progress

            self.receiver.remove_nodes_resp(**deletion_kwargs)

            self.db.refresh(task_deletion)
            self.assertEqual(task_deletion.progress, subtask_progress)

            self.db.refresh(supertask)
            progress_after_delete_subtask = supertask.progress

            return abs(progress_after_delete_subtask -
                       progress_before_delete_subtask)

        without_coeff = progress_difference()

        task_deletion.progress = 0
        task_deletion.weight = 0.5
        self.db.merge(task_deletion)

        task_provision.progress = 0
        self.db.merge(task_provision)

        supertask.progress = 0
        self.db.merge(supertask)

        self.db.commit()

        with_coeff = progress_difference()

        # some freaking magic is here but haven't found
        # better way to test what is already working
        self.assertTrue((without_coeff / with_coeff) < 2)
示例#21
0
    def test_verify_networks_resp_incomplete_network_data_error(self):
        # One node has single interface
        meta = self.env.default_metadata()
        mac = '02:07:43:78:4F:58'
        self.env.set_interfaces_in_meta(
            meta, [{'name': 'eth0', 'mac': mac}])

        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False, 'name': 'node1'},
                {"api": False, 'name': 'node2', 'meta': meta},
                {"api": False, 'name': 'node3'}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2, node3 = self.env.nodes

        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)},
                     {'iface': 'eth1', 'vlans': [106]},
                     {'iface': 'eth2', 'vlans': [107]}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': [{'uid': node1.id, 'networks': nets_sent},
                          {'uid': node2.id, 'networks': nets_sent},
                          {'uid': node3.id, 'networks': nets_sent}]
            }
        }
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': [{'uid': node1.id, 'networks': nets_sent},
                            {'uid': node2.id, 'networks': []},
                            {'uid': node3.id, 'networks': nets_sent}]}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        self.assertEqual(task.message, '')
        error_nodes = [{'uid': node2.id, 'interface': 'eth0',
                        'name': node2.name, 'mac': node2.interfaces[0].mac,
                        'absent_vlans': nets_sent[0]['vlans']},
                       {'uid': node2.id, 'interface': 'eth1',
                        'name': node2.name, 'mac': 'unknown',
                        'absent_vlans': nets_sent[1]['vlans']},
                       {'uid': node2.id, 'interface': 'eth2',
                        'name': node2.name, 'mac': 'unknown',
                        'absent_vlans': nets_sent[2]['vlans']}
                       ]

        self.assertEqual(task.result, error_nodes)
示例#22
0
    def execute(self, force=False, **kwargs):
        try:
            self.clear_tasks_history(force=force)
        except errors.TaskAlreadyRunning:
            raise errors.DeploymentAlreadyStarted(
                "Can't reset environment '{0}' when "
                "running deployment task exists.".format(
                    self.cluster.id
                )
            )

        # FIXME(aroma): remove updating of 'deployed_before'
        # when stop action is reworked. 'deployed_before'
        # flag identifies whether stop action is allowed for the
        # cluster. Please, refer to [1] for more details.
        # [1]: https://bugs.launchpad.net/fuel/+bug/1529691
        objects.Cluster.set_deployed_before_flag(self.cluster, value=False)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt
        )
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        objects.ClusterPluginLinkCollection.delete_by_cluster_id(
            self.cluster.id)

        db().commit()

        supertask = Task(
            name=consts.TASK_NAMES.reset_environment,
            cluster=self.cluster
        )
        db().add(supertask)
        al = TaskHelper.create_action_log(supertask)

        reset_nodes = supertask.create_subtask(
            consts.TASK_NAMES.reset_nodes
        )

        remove_keys_task = supertask.create_subtask(
            consts.TASK_NAMES.remove_keys
        )

        remove_ironic_bootstrap_task = supertask.create_subtask(
            consts.TASK_NAMES.remove_ironic_bootstrap
        )

        db.commit()

        rpc.cast('naily', [
            tasks.ResetEnvironmentTask.message(reset_nodes),
            tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
            tasks.RemoveClusterKeys.message(remove_keys_task)
        ])
        TaskHelper.update_action_log(supertask, al)
        return supertask
示例#23
0
    def execute(self, nets, vlan_ids):
        self.remove_previous_task()

        task = Task(
            name="check_networks",
            cluster=self.cluster
        )

        if len(self.cluster.nodes) < 2:
            task.status = 'error'
            task.progress = 100
            task.message = ('At least two nodes are required to be '
                            'in the environment for network verification.')
            db().add(task)
            db().commit()
            return task

        db().add(task)
        db().commit()

        self._call_silently(
            task,
            tasks.CheckNetworksTask,
            data=nets,
            check_admin_untagged=True
        )
        db().refresh(task)

        if task.status != 'error':
            # this one is connected with UI issues - we need to
            # separate if error happened inside nailgun or somewhere
            # in the orchestrator, and UI does it by task name.
            task.name = 'verify_networks'

            dhcp_subtask = objects.task.Task.create_subtask(
                task, name='check_dhcp',)

            multicast = objects.task.Task.create_subtask(
                task, name='multicast_verification')

            corosync = self.cluster.attributes['editable']['corosync']
            group = corosync['group']['value']
            port = corosync['port']['value']
            conf = {'group': group, 'port': port}

            verify_task = tasks.VerifyNetworksTask(task, vlan_ids)
            verify_task.add_subtask(tasks.CheckDhcpTask(dhcp_subtask,
                                                        vlan_ids))
            verify_task.add_subtask(
                tasks.MulticastVerificationTask(multicast, conf))

            self._call_silently(task, verify_task)

        return task
    def test_verify_networks_resp_error_with_removed_node(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )

        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]
        nets_resp = [{'iface': 'eth0', 'vlans': range(100, 104)}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': self.nodes_message((node1, node2), nets_sent),
                'offline': 0,
            }
        }
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': self.nodes_message((node1, node2), nets_resp)}
        self.db.delete(node2)
        self.db.commit()
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        resp = self.app.get(
            reverse('TaskHandler', kwargs={'obj_id': task.id}),
            headers=self.default_headers
        )
        self.assertEqual(resp.status_code, 200)
        task = resp.json_body
        self.assertEqual(task['status'], "error")
        error_nodes = [{'uid': node1.id,
                        'interface': 'eth0',
                        'name': node1.name,
                        'absent_vlans': [104],
                        'mac': node1.interfaces[0].mac},
                       {'uid': node2.id,
                        'interface': 'eth0',
                        'name': node2.name,
                        'absent_vlans': [104],
                        'mac': 'unknown'}]
        self.assertEqual(task.get('message'), '')
        self.assertEqual(task['result'], error_nodes)
示例#25
0
    def execute(self):
        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name=consts.TASK_NAMES.deploy,
            status='running'
        ).first()
        if deploy_running:
            raise errors.DeploymentAlreadyStarted(
                u"Can't reset environment '{0}' when "
                u"deployment is running".format(
                    self.cluster.id
                )
            )

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
        ).filter(
            Task.name.in_([
                consts.TASK_NAMES.deploy,
                consts.TASK_NAMES.deployment,
                consts.TASK_NAMES.stop_deployment
            ])
        )

        for task in obsolete_tasks:
            db().delete(task)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        db().commit()

        supertask = Task(
            name=consts.TASK_NAMES.reset_environment,
            cluster=self.cluster
        )
        db().add(supertask)
        al = TaskHelper.create_action_log(supertask)

        remove_keys_task = supertask.create_subtask(
            consts.TASK_NAMES.reset_environment
        )

        db.commit()

        rpc.cast('naily', [
            tasks.ResetEnvironmentTask.message(supertask),
            tasks.RemoveClusterKeys.message(remove_keys_task)
        ])
        TaskHelper.update_action_log(supertask, al)
        return supertask
示例#26
0
    def execute(self):
        if not self.cluster.pending_release_id:
            raise errors.InvalidReleaseId(
                u"Can't update environment '{0}' when "
                u"new release Id is invalid".format(self.cluster.name))

        running_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
            status='running'
        ).filter(
            Task.name.in_([
                'deploy',
                'deployment',
                'reset_environment',
                'stop_deployment'
            ])
        )
        if running_tasks.first():
            raise errors.TaskAlreadyRunning(
                u"Can't update environment '{0}' when "
                u"other task is running".format(
                    self.cluster.id
                )
            )

        nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster)
        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_change)
        logger.debug('Nodes to update: {0}'.format(
            ' '.join([n.fqdn for n in nodes_to_change])))
        task_update = Task(name='update', cluster=self.cluster)
        db().add(task_update)
        self.cluster.status = 'update'
        db().flush()

        deployment_message = self._call_silently(
            task_update,
            tasks.UpdateTask,
            nodes_to_change,
            method_name='message')

        db().refresh(task_update)

        task_update.cache = deployment_message

        for node in nodes_to_change:
            node.status = 'deploying'
            node.progress = 0

        db().commit()
        rpc.cast('naily', deployment_message)

        return task_update
示例#27
0
    def execute(self, nets, vlan_ids):
        self.remove_previous_task()

        task = Task(
            name=TASK_NAMES.check_networks,
            cluster=self.cluster
        )

        if len(self.cluster.nodes) < 2:
            task.status = TASK_STATUSES.error
            task.progress = 100
            task.message = ('At least two nodes are required to be '
                            'in the environment for network verification.')
            db().add(task)
            db().commit()
            return task

        db().add(task)
        db().commit()

        self._call_silently(
            task,
            tasks.CheckNetworksTask,
            data=nets,
            check_admin_untagged=True
        )
        db().refresh(task)

        if task.status != TASK_STATUSES.error:
            # this one is connected with UI issues - we need to
            # separate if error happened inside nailgun or somewhere
            # in the orchestrator, and UI does it by task name.
            task.name = TASK_NAMES.verify_networks
            verify_task = tasks.VerifyNetworksTask(task, vlan_ids)

            if tasks.CheckDhcpTask.enabled(self.cluster):
                dhcp_subtask = objects.task.Task.create_subtask(
                    task, name=TASK_NAMES.check_dhcp)
                verify_task.add_subtask(tasks.CheckDhcpTask(
                    dhcp_subtask, vlan_ids))

            if tasks.MulticastVerificationTask.enabled(self.cluster):
                multicast = objects.task.Task.create_subtask(
                    task, name=TASK_NAMES.multicast_verification)
                verify_task.add_subtask(
                    tasks.MulticastVerificationTask(multicast))

            db().commit()
            self._call_silently(task, verify_task)

        return task
示例#28
0
    def execute(self, nets, vlan_ids):
        self.remove_previous_task()

        task = Task(
            name="check_networks",
            cluster=self.cluster
        )

        if len(self.cluster.nodes) < 2:
            task.status = 'error'
            task.progress = 100
            task.message = ('At least two nodes are required to be '
                            'in the environment for network verification.')
            db().add(task)
            db().commit()
            return task

        db().add(task)
        db().commit()

        self._call_silently(
            task,
            tasks.CheckNetworksTask,
            data=nets,
            check_admin_untagged=True
        )
        db().refresh(task)

        if task.status != 'error':
            # this one is connected with UI issues - we need to
            # separate if error happened inside nailgun or somewhere
            # in the orchestrator, and UI does it by task name.

            dhcp_subtask = Task(
                name='check_dhcp',
                cluster=self.cluster,
                parent_id=task.id)
            db().add(dhcp_subtask)
            db().commit()
            db().refresh(task)

            task.name = 'verify_networks'

            self._call_silently(
                task,
                tasks.VerifyNetworksTask,
                vlan_ids
            )

        return task
示例#29
0
    def execute(self, nodes_to_provision, **kwargs):
        """Run provisioning task on specified nodes."""
        # locking nodes
        nodes_ids = [node.id for node in nodes_to_provision]
        nodes = objects.NodeCollection.filter_by_list(
            None,
            'id',
            nodes_ids,
            order_by='id'
        )

        logger.debug('Nodes to provision: {0}'.format(
            ' '.join([objects.Node.get_node_fqdn(n)
                      for n in nodes_to_provision])))

        task_provision = Task(name=consts.TASK_NAMES.provision,
                              status=consts.TASK_STATUSES.pending,
                              cluster=self.cluster)
        db().add(task_provision)

        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        db().commit()

        provision_message = self._call_silently(
            task_provision,
            tasks.ProvisionTask,
            nodes_to_provision,
            method_name='message'
        )

        task_provision = objects.Task.get_by_uid(
            task_provision.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        task_provision.cache = provision_message
        objects.NodeCollection.lock_for_update(nodes).all()

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = consts.NODE_STATUSES.provisioning
            node.progress = 0

        db().commit()

        rpc.cast('naily', provision_message)

        return task_provision
    def test_verify_networks_with_dhcp_subtask_erred(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(
            name="verify_networks",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': self.nodes_message((node1, node2), nets_sent),
                'offline': 0,
            }
        }
        self.db.add(task)
        self.db.commit()
        dhcp_subtask = Task(
            name='check_dhcp',
            cluster_id=cluster_db.id,
            parent_id=task.id,
            status='error',
            message='DHCP ERROR'
        )
        self.db.add(dhcp_subtask)
        self.db.commit()
        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': self.nodes_message((node1, node2), [])}
        kwargs['nodes'][0]['networks'] = nets_sent
        self.receiver.verify_networks_resp(**kwargs)

        self.assertEqual(task.status, "error")
        self.assertEqual(task.message, u'DHCP ERROR')

        task.result[0]['absent_vlans'] = sorted(task.result[0]['absent_vlans'])
        self.assertEqual(task.result, [{
            u'absent_vlans': [100, 101, 102, 103, 104],
            u'interface': 'eth0',
            u'mac': node2.interfaces[0].mac,
            u'name': 'Untitled ({0})'.format(node2.mac[-5:].lower()),
            u'uid': node2.id}])
示例#31
0
文件: manager.py 项目: tsipa/fuel-web
    def execute(self):
        logger.info("Trying to start capacity_log task")
        self.check_running_task('capacity_log')

        task = Task(name='capacity_log')
        db().add(task)
        db().commit()
        self._call_silently(
            task,
            tasks.GenerateCapacityLogTask)
        return task
示例#32
0
文件: manager.py 项目: tsipa/fuel-web
 def execute(self):
     logger.debug("Creating release dowload task")
     task = Task(name="download_release")
     db().add(task)
     db().commit()
     self._call_silently(
         task,
         tasks.DownloadReleaseTask,
         self.release_data
     )
     return task
示例#33
0
    def execute(self, nodes_to_provision):
        """Run provisioning task on specified nodes
        """
        # locking nodes
        nodes_ids = [node.id for node in nodes_to_provision]
        nodes = objects.NodeCollection.filter_by_list(None,
                                                      'id',
                                                      nodes_ids,
                                                      order_by='id')
        objects.NodeCollection.lock_for_update(nodes).all()

        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
        logger.debug('Nodes to provision: {0}'.format(' '.join(
            [n.fqdn for n in nodes_to_provision])))

        task_provision = Task(name=consts.TASK_NAMES.provision,
                              cluster=self.cluster)
        db().add(task_provision)
        db().commit()

        provision_message = self._call_silently(task_provision,
                                                tasks.ProvisionTask,
                                                nodes_to_provision,
                                                method_name='message')

        task_provision = objects.Task.get_by_uid(task_provision.id,
                                                 fail_if_not_found=True,
                                                 lock_for_update=True)
        task_provision.cache = provision_message
        objects.NodeCollection.lock_for_update(nodes).all()

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = consts.NODE_STATUSES.provisioning
            node.progress = 0

        db().commit()

        rpc.cast('naily', provision_message)

        return task_provision
示例#34
0
    def test_verify_networks_with_dhcp_subtask(self):
        """Test verifies that when dhcp subtask is ready and
        verify_networks errored - verify_networks will be in error
        """
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(
            name="verify_networks",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': [{'uid': node1.id, 'networks': nets_sent},
                          {'uid': node2.id, 'networks': nets_sent}]
            }
        }
        self.db.add(task)
        self.db.commit()
        dhcp_subtask = Task(
            name='check_dhcp',
            cluster_id=cluster_db.id,
            parent_id=task.id,
            status='ready'
        )
        self.db.add(dhcp_subtask)
        self.db.commit()
        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': [{'uid': node1.id, 'networks': nets_sent},
                            {'uid': node2.id, 'networks': []}]}
        self.receiver.verify_networks_resp(**kwargs)
        self.assertEqual(task.status, "error")
示例#35
0
    def test_verify_networks_resp_incomplete_network_data_on_first_node(self):
        """Test verifies that when network data is incomplete on first node
        task would not fail and be erred as expected
        """
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False, 'name': 'node1'},
                {"api": False, 'name': 'node2'},
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': [{'uid': node1.id, 'networks': nets_sent},
                          {'uid': node2.id, 'networks': nets_sent}]
            }
        }

        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': [{'uid': node1.id, 'networks': []},
                            {'uid': node2.id, 'networks': nets_sent}]}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        self.assertEqual(task.message, '')
        error_nodes = [{'uid': node1.id, 'interface': 'eth0',
                        'name': node1.name, 'mac': node1.interfaces[0].mac,
                        'absent_vlans': nets_sent[0]['vlans']}]
        self.assertEqual(task.result, error_nodes)
示例#36
0
    def execute(self, filters, force=False, graph_type=None, **kwargs):
        self.check_running_task(consts.TASK_NAMES.deployment)

        task = Task(name=consts.TASK_NAMES.deployment,
                    cluster=self.cluster,
                    status=consts.TASK_STATUSES.pending)
        db().add(task)

        nodes_to_update = objects.Cluster.get_nodes_to_update_config(
            self.cluster, filters.get('node_ids'), filters.get('node_role'))

        message = self._call_silently(task,
                                      self.get_deployment_task(),
                                      nodes_to_update,
                                      graph_type=graph_type,
                                      method_name='message',
                                      force=force)

        # locking task
        task = objects.Task.get_by_uid(task.id,
                                       fail_if_not_found=True,
                                       lock_for_update=True)

        if task.is_completed():
            return task

        # locking nodes
        objects.NodeCollection.lock_nodes(nodes_to_update)

        task.cache = copy.copy(message)
        task.cache['nodes'] = [n.id for n in nodes_to_update]

        for node in nodes_to_update:
            node.status = consts.NODE_STATUSES.deploying
            node.progress = 0

        db().commit()

        rpc.cast('naily', message)

        return task
示例#37
0
    def test_do_not_set_cluster_to_error_if_validation_failed(self):
        for task_name in ['check_before_deployment', 'check_networks']:
            supertask = Task(
                name='deploy',
                cluster=self.cluster,
                status='error')

            check_task = Task(
                name=task_name,
                cluster=self.cluster,
                status='error')

            supertask.subtasks.append(check_task)
            self.db.add(check_task)
            self.db.commit()

            objects.Task._update_cluster_data(supertask)
            self.db.flush()

            self.assertEqual(self.cluster.status, 'new')
            self.assertFalse(self.cluster.is_locked)
示例#38
0
 def execute(self, data, check_admin_untagged=False):
     task = Task(name="check_networks", cluster=self.cluster)
     db().add(task)
     db().commit()
     self._call_silently(task, tasks.CheckNetworksTask, data,
                         check_admin_untagged)
     db().refresh(task)
     if task.status == 'running':
         TaskHelper.update_task_status(task.uuid,
                                       status="ready",
                                       progress=100)
     return task
示例#39
0
 def test_running_task_deletion(self):
     task = Task(name='deployment',
                 cluster=self.cluster_db,
                 status=consts.TASK_STATUSES.running,
                 progress=10)
     self.db.add(task)
     self.db.flush()
     resp = self.app.delete(
         reverse('TaskHandler', kwargs={'obj_id': task.id}) + "?force=0",
         headers=self.default_headers,
         expect_errors=True)
     self.assertEqual(resp.status_code, 400)
示例#40
0
    def create_deploy_tasks(self):
        cluster = self.env.create()

        deploy_task = Task(name=consts.TASK_NAMES.deploy,
                           cluster_id=cluster.id,
                           status=consts.TASK_STATUSES.pending)
        self.db.add(deploy_task)
        self.db.flush()
        provision_task = Task(name=consts.TASK_NAMES.provision,
                              status=consts.TASK_STATUSES.pending,
                              parent_id=deploy_task.id,
                              cluster_id=cluster.id)
        self.db.add(provision_task)
        deployment_task = Task(name=consts.TASK_NAMES.deployment,
                               status=consts.TASK_STATUSES.pending,
                               parent_id=deploy_task.id,
                               cluster_id=cluster.id)
        self.db.add(deployment_task)
        self.db.flush()

        return deploy_task, provision_task, deployment_task
示例#41
0
    def test_update_nodes_to_error_if_provision_task_failed(self):
        self.cluster.nodes[0].status = 'provisioning'
        self.cluster.nodes[0].progress = 12
        task = Task(name='provision', cluster=self.cluster, status='error')
        self.db.add(task)
        self.db.commit()

        TaskHelper.update_cluster_status(task.uuid)

        self.assertEquals(self.cluster.status, 'error')
        self.node_should_be_error_with_type(self.cluster.nodes[0], 'provision')
        self.nodes_should_not_be_error(self.cluster.nodes[1:])
示例#42
0
    def execute(self, **kwargs):
        logger.info("Starting update_dnsmasq task")
        self.check_running_task(consts.TASK_NAMES.update_dnsmasq)

        task = Task(name=consts.TASK_NAMES.update_dnsmasq)
        db().add(task)
        db().commit()
        self._call_silently(
            task,
            tasks.UpdateDnsmasqTask
        )
        return task
示例#43
0
 def test_mongo_node_without_ext_mongo(self):
     cluster = self.env.create(release_kwargs={
         'attributes_metadata':
         self.get_custom_meta(True, False)
     },
                               nodes_kwargs=[{
                                   'pending_roles': ['mongo'],
                                   'status': 'discover',
                                   'pending_addition': True
                               }])
     task = Task(name=TASK_NAMES.deploy, cluster=cluster)
     CheckBeforeDeploymentTask._check_mongo_nodes(task)
示例#44
0
    def execute(self):
        logger.info("Trying to start dump_environment task")
        self.check_running_task('dump')

        task = Task(name="dump")
        db().add(task)
        db().commit()
        self._call_silently(
            task,
            tasks.DumpTask,
        )
        return task
示例#45
0
    def test_verify_networks_resp_error(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]
        nets_resp = [{'iface': 'eth0', 'vlans': range(100, 104)}]

        task = Task(
            name="super",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                'nodes': self.nodes_message((node1, node2), nets_sent),
                'offline': 0,
            }
        }
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'status': 'ready',
                  'nodes': self.nodes_message((node1, node2), nets_resp)}
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        error_nodes = []
        for node in self.env.nodes:
            error_nodes.append({'uid': node.id, 'interface': 'eth0',
                                'name': node.name, 'absent_vlans': [104],
                                'mac': node.interfaces[0].mac})
        self.assertEqual(task.message, '')
        self.assertEqual(task.result, error_nodes)
示例#46
0
    def test_proper_progress_calculation(self):
        supertask = Task(
            uuid=str(uuid.uuid4()),
            name="super",
            status="running"
        )

        self.db.add(supertask)
        self.db.commit()

        subtask_weight = 0.4
        task_deletion = supertask.create_subtask("node_deletion",
                                                 weight=subtask_weight)
        task_provision = supertask.create_subtask("provision",
                                                  weight=subtask_weight)

        subtask_progress = random.randint(1, 20)

        deletion_kwargs = {'task_uuid': task_deletion.uuid,
                           'progress': subtask_progress,
                           'status': 'running'}
        provision_kwargs = {'task_uuid': task_provision.uuid,
                            'progress': subtask_progress,
                            'status': 'running'}

        self.receiver.provision_resp(**provision_kwargs)
        self.db.commit()
        self.receiver.remove_nodes_resp(**deletion_kwargs)
        self.db.commit()

        self.db.refresh(task_deletion)
        self.db.refresh(task_provision)
        self.db.refresh(supertask)

        calculated_progress = helpers.\
            TaskHelper.calculate_parent_task_progress(
                [task_deletion, task_provision]
            )

        self.assertEqual(supertask.progress, calculated_progress)
示例#47
0
    def execute(self, nodes, mclient_remove=True):
        cluster_id = None
        if hasattr(self, 'cluster'):
            cluster_id = self.cluster.id
            objects.TaskCollection.lock_cluster_tasks(cluster_id)

        logger.info("Trying to execute node deletion task with nodes %s",
                    ', '.join(str(node.id) for node in nodes))

        self.verify_nodes_with_cluster(nodes)
        objects.NodeCollection.lock_nodes(nodes)

        if cluster_id is None:
            # DeletionTask operates on cluster's nodes.
            # Nodes that are not in cluster are simply deleted.

            Node.delete_by_ids([n.id for n in nodes])
            db().flush()

            task = Task(name=consts.TASK_NAMES.node_deletion,
                        progress=100,
                        status=consts.TASK_STATUSES.ready)
            db().add(task)
            db().flush()

            return task

        task = Task(name=consts.TASK_NAMES.node_deletion, cluster=self.cluster)
        db().add(task)
        for node in nodes:
            objects.Node.update(node,
                                {'status': consts.NODE_STATUSES.removing})
        db().flush()

        self._call_silently(task,
                            tasks.DeletionTask,
                            nodes=tasks.DeletionTask.prepare_nodes_for_task(
                                nodes, mclient_remove=mclient_remove))

        return task
示例#48
0
    def execute(self, nodes_to_deployment, deployment_tasks=None):
        deployment_tasks = deployment_tasks or []

        # locking nodes for update
        objects.NodeCollection.lock_nodes(nodes_to_deployment)
        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment)

        logger.debug('Nodes to deploy: {0}'.format(' '.join(
            [n.fqdn for n in nodes_to_deployment])))
        task_deployment = Task(name=consts.TASK_NAMES.deployment,
                               cluster=self.cluster)
        db().add(task_deployment)

        deployment_message = self._call_silently(
            task_deployment,
            tasks.DeploymentTask,
            nodes_to_deployment,
            deployment_tasks=deployment_tasks,
            method_name='message')

        db().refresh(task_deployment)

        # locking task
        task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                  fail_if_not_found=True,
                                                  lock_for_update=True)
        # locking nodes
        objects.NodeCollection.lock_nodes(nodes_to_deployment)

        task_deployment.cache = deployment_message

        for node in nodes_to_deployment:
            node.status = 'deploying'
            node.progress = 0

        db().commit()

        rpc.cast('naily', deployment_message)

        return task_deployment
示例#49
0
    def execute(self):
        # locking tasks for processing
        names = (TASK_NAMES.stop_deployment, TASK_NAMES.deployment,
                 TASK_NAMES.provision)
        objects.TaskCollection.lock_cluster_tasks(self.cluster.id, names=names)

        stop_running = objects.TaskCollection.filter_by(
            None,
            cluster_id=self.cluster.id,
            name=TASK_NAMES.stop_deployment,
        )
        stop_running = objects.TaskCollection.order_by(stop_running,
                                                       'id').first()

        if stop_running:
            if stop_running.status == TASK_STATUSES.running:
                raise errors.StopAlreadyRunning("Stopping deployment task "
                                                "is already launched")
            else:
                db().delete(stop_running)
                db().flush()

        deployment_task = objects.TaskCollection.filter_by(
            None,
            cluster_id=self.cluster.id,
            name=TASK_NAMES.deployment,
        )
        deployment_task = objects.TaskCollection.order_by(
            deployment_task, 'id').first()

        provisioning_task = objects.TaskCollection.filter_by(
            None,
            cluster_id=self.cluster.id,
            name=TASK_NAMES.provision,
        )
        provisioning_task = objects.TaskCollection.order_by(
            provisioning_task, 'id').first()

        if not deployment_task and not provisioning_task:
            db().rollback()
            raise errors.DeploymentNotRunning(
                u"Nothing to stop - deployment is "
                u"not running on environment '{0}'".format(self.cluster.id))

        task = Task(name="stop_deployment", cluster=self.cluster)
        db().add(task)
        db().commit()
        self._call_silently(task,
                            tasks.StopDeploymentTask,
                            deploy_task=deployment_task,
                            provision_task=provisioning_task)
        return task
示例#50
0
    def test_remove_cluster_resp_failed(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False}
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        self.env.create_notification(
            cluster_id=cluster_db.id
        )

        task = Task(
            uuid=str(uuid.uuid4()),
            name="cluster_deletion",
            cluster_id=cluster_db.id
        )
        self.db.add(task)
        self.db.commit()

        kwargs = {'task_uuid': task.uuid,
                  'progress': 100,
                  'status': 'error',
                  'nodes': [{'uid': node1.id}],
                  'error_nodes': [{'uid': node1.id,
                                   'error': "RPC method failed"}],
                  }

        self.receiver.remove_cluster_resp(**kwargs)
        self.db.refresh(task)
        self.assertEqual(task.status, "error")

        nodes_db = self.db.query(Node)\
            .filter_by(cluster_id=cluster_db.id).all()
        self.assertNotEqual(len(nodes_db), 0)

        attrs_db = self.db.query(Attributes)\
            .filter_by(cluster_id=cluster_db.id).all()
        self.assertNotEqual(len(attrs_db), 0)

        nots_db = self.db.query(Notification)\
            .filter_by(cluster_id=cluster_db.id).all()
        self.assertNotEqual(len(nots_db), 0)

        nets_db = self.db.query(NetworkGroup).\
            filter(NetworkGroup.group_id ==
                   objects.Cluster.get_default_group(
                       self.env.clusters[0]).id).\
            all()
        self.assertNotEqual(len(nets_db), 0)
示例#51
0
文件: manager.py 项目: tsipa/fuel-web
    def execute(self):
        stop_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name='stop_deployment'
        ).first()
        if stop_running:
            if stop_running.status == 'running':
                raise errors.StopAlreadyRunning(
                    "Stopping deployment task "
                    "is already launched"
                )
            else:
                db().delete(stop_running)
                db().commit()

        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name='deployment',
            status='running'
        ).first()
        if not deploy_running:
            provisioning_running = db().query(Task).filter_by(
                cluster=self.cluster,
                name='provision',
                status='running'
            ).first()
            if provisioning_running:
                raise errors.DeploymentNotRunning(
                    u"Provisioning interruption for environment "
                    u"'{0}' is not implemented right now".format(
                        self.cluster.id
                    )
                )
            raise errors.DeploymentNotRunning(
                u"Nothing to stop - deployment is "
                u"not running on environment '{0}'".format(
                    self.cluster.id
                )
            )

        task = Task(
            name="stop_deployment",
            cluster=self.cluster
        )
        db().add(task)
        db.commit()
        self._call_silently(
            task,
            tasks.StopDeploymentTask,
            deploy_task=deploy_running
        )
        return task
示例#52
0
    def test_verify_networks_error_and_notice_are_concatenated(self):
        self.env.create(
            cluster_kwargs={},
            nodes_kwargs=[
                {"api": False},
                {"api": False},
            ]
        )
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(
            name="verify_networks",
            cluster_id=cluster_db.id
        )
        task.cache = {
            "args": {
                "nodes": self.nodes_message((node1, node2), nets),
                "offline": 2,
            }
        }
        self.db.add(task)
        self.db.flush()

        custom_error = 'CustomError'
        kwargs = {'task_uuid': task.uuid,
                  'status': 'error',
                  'nodes': self.nodes_message((node1, node2), nets),
                  'error': custom_error}

        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        offline_notice = 'Notice: 2 node(s) were offline during connectivity' \
                         ' check so they were skipped from the check.'
        self.assertEqual(task.message,
                         '\n'.join((custom_error, offline_notice)))
示例#53
0
文件: manager.py 项目: tsipa/fuel-web
    def execute(self, nodes_to_provision):
        """Run provisioning task on specified nodes

        Constraints: currently this task cannot deploy RedHat.
                     For redhat here should be added additional
                     tasks e.i. check credentials, check licenses,
                     redhat downloading.
                     Status of this task you can track here:
                     https://blueprints.launchpad.net/fuel/+spec
                           /nailgun-separate-provisioning-for-redhat
        """
        TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
        logger.debug('Nodes to provision: {0}'.format(
            ' '.join([n.fqdn for n in nodes_to_provision])))

        task_provision = Task(name='provision', cluster=self.cluster)
        db().add(task_provision)
        db().commit()

        provision_message = self._call_silently(
            task_provision,
            tasks.ProvisionTask,
            nodes_to_provision,
            method_name='message'
        )
        db().refresh(task_provision)

        task_provision.cache = provision_message

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = 'provisioning'
            node.progress = 0

        db().commit()

        rpc.cast('naily', provision_message)

        return task_provision
示例#54
0
    def setUp(self):
        super(TestCheckBeforeDeploymentTask, self).setUp()
        self.env.create(cluster_kwargs={},
                        nodes_kwargs=[{
                            'roles': ['controller']
                        }])

        self.env.create_node()
        self.node = self.env.nodes[0]
        self.cluster = self.env.clusters[0]
        self.task = Task(cluster_id=self.env.clusters[0].id)
        self.env.db.add(self.task)
        self.env.db.commit()
示例#55
0
    def test_update_nodes_to_error_if_deployment_task_failed(self):
        self.cluster.nodes[0].status = 'deploying'
        self.cluster.nodes[0].progress = 12
        task = Task(name='deployment', cluster=self.cluster, status='error')
        self.db.add(task)
        self.db.flush()

        objects.Task._update_cluster_data(task)
        self.db.flush()

        self.assertEquals(self.cluster.status, 'error')
        self._node_should_be_error_with_type(self.cluster.nodes[0], 'deploy')
        self._nodes_should_not_be_error(self.cluster.nodes[1:])
示例#56
0
    def test_task_contains_field_parent(self):
        parent_task = Task(name=consts.TASK_NAMES.deployment,
                           cluster=self.cluster_db,
                           status=consts.TASK_STATUSES.running,
                           progress=10)
        child_task = parent_task.create_subtask(
            name=consts.TASK_NAMES.deployment,
            status=consts.TASK_STATUSES.running,
            progress=10)

        cluster_tasks = self.app.get(
            reverse('TaskCollectionHandler',
                    kwargs={'cluster_id': self.cluster_db.id}),
            headers=self.default_headers).json_body

        child_task_data = next(t for t in cluster_tasks
                               if t['id'] == child_task.id)

        self.assertEqual(parent_task.id, child_task_data['parent_id'])
        parent_task_data = next(t for t in cluster_tasks
                                if t['id'] == parent_task.id)
        self.assertIsNone(parent_task_data['parent_id'])
示例#57
0
    def execute(self, conf=None, **kwargs):
        logger.info("Trying to start dump_environment task")
        self.check_running_task(consts.TASK_NAMES.dump)

        task = Task(name=consts.TASK_NAMES.dump)
        db().add(task)
        db().flush()
        self._call_silently(
            task,
            tasks.DumpTask,
            conf=conf
        )
        return task
示例#58
0
    def _raise_error_task(self, cluster, task_name, exc):
        # set task status to error and update its corresponding data
        task = Task(name=task_name,
                    cluster=cluster,
                    status=consts.TASK_STATUSES.error,
                    progress=100,
                    message=six.text_type(exc))
        db().add(task)
        db().commit()

        logger.exception('Error in network configuration')

        self.raise_task(task)
示例#59
0
 def test_mongo_node_with_ext_mongo(self):
     self.env.create(release_kwargs={
         'attributes_metadata': self.get_custom_meta(True, True)
     },
                     nodes_kwargs=[{
                         'pending_roles': ['mongo'],
                         'status': 'discover',
                         'pending_addition': True
                     }])
     cluster = self.env.clusters[0]
     task = Task(name=TASK_NAMES.deploy, cluster=cluster)
     self.assertRaises(errors.ExtMongoCheckerError,
                       CheckBeforeDeploymentTask._check_mongo_nodes, task)
示例#60
0
    def test_verify_networks_resp_empty_nodes_custom_error(self):
        self.env.create(cluster_kwargs={},
                        nodes_kwargs=[{
                            "api": False
                        }, {
                            "api": False
                        }])
        cluster_db = self.env.clusters[0]
        node1, node2 = self.env.nodes
        nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}]

        task = Task(name="super", cluster_id=cluster_db.id)
        task.cache = {
            "args": {
                'nodes': [{
                    'uid': node1.id,
                    'networks': nets_sent
                }, {
                    'uid': node2.id,
                    'networks': nets_sent
                }]
            }
        }
        self.db.add(task)
        self.db.commit()

        error_msg = 'Custom error message.'
        kwargs = {
            'task_uuid': task.uuid,
            'status': 'ready',
            'nodes': [],
            'error': error_msg
        }
        self.receiver.verify_networks_resp(**kwargs)
        self.db.flush()
        self.db.refresh(task)
        self.assertEqual(task.status, "error")
        self.assertEqual(task.message, error_msg)