def test_verify_networks_with_dhcp_subtask_erred(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{ "api": False }, { "api": False }]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task(name="verify_networks", cluster_id=cluster_db.id) task.cache = { "args": { 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }] } } self.db.add(task) self.db.commit() dhcp_subtask = Task(name='check_dhcp', cluster_id=cluster_db.id, parent_id=task.id, status='error', message='DHCP ERROR') self.db.add(dhcp_subtask) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': [] }] } self.receiver.verify_networks_resp(**kwargs) self.assertEqual(task.status, "error") self.assertEqual(task.message, u'DHCP ERROR') self.assertEqual( task.result, [{ u'absent_vlans': [100, 101, 102, 103, 104], u'interface': 'eth0', u'mac': node2.interfaces[0].mac, u'name': 'Untitled ({0})'.format(node2.mac[-5:].lower()), u'uid': node2.id }])
def test_verify_networks_with_dhcp_subtask(self): """Test verifies that when dhcp subtask is ready and verify_networks errored - verify_networks will be in error """ self.env.create(cluster_kwargs={}, nodes_kwargs=[{ "api": False }, { "api": False }]) cluster_db = self.env.clusters[0] node1, node2 = self.env.nodes nets_sent = [{'iface': 'eth0', 'vlans': range(100, 105)}] task = Task(name="verify_networks", cluster_id=cluster_db.id) task.cache = { "args": { 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': nets_sent }] } } self.db.add(task) self.db.commit() dhcp_subtask = Task(name='check_dhcp', cluster_id=cluster_db.id, parent_id=task.id, status='ready') self.db.add(dhcp_subtask) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', 'nodes': [{ 'uid': node1.id, 'networks': nets_sent }, { 'uid': node2.id, 'networks': [] }] } self.receiver.verify_networks_resp(**kwargs) self.assertEqual(task.status, "error")
def execute(self): deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running').first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format(self.cluster.id)) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ])) for task in obsolete_tasks: db().delete(task) db().commit() task = Task(name=consts.TASK_NAMES.reset_environment, cluster=self.cluster) db().add(task) db.commit() self._call_silently(task, tasks.ResetEnvironmentTask) return task
def launch_verify(self, cluster): try: data = self.validator.validate_networks_update(web.data()) except web.webapi.badrequest as exc: task = Task(name='check_networks', cluster=cluster) db().add(task) db().commit() TaskHelper.set_error(task.uuid, exc.data) logger.error(traceback.format_exc()) json_task = build_json_response(TaskHandler.render(task)) raise web.accepted(data=json_task) data["networks"] = [ n for n in data["networks"] if n.get("name") != "fuelweb_admin" ] vlan_ids = [{ 'name': n['name'], 'vlans': cluster.network_manager.generate_vlan_ids_list( data, cluster, n) } for n in data['networks']] task_manager = VerifyNetworksTaskManager(cluster_id=cluster.id) try: task = task_manager.execute(data, vlan_ids) except errors.CantRemoveOldVerificationTask: raise web.badrequest("You cannot delete running task manually") return TaskHandler.render(task)
def test_notification_delete_cluster_done(self): cluster = self.env.create_cluster(api=False) cluster_name = cluster.name receiver = rcvr.NailgunReceiver() task = Task(uuid=str(uuid.uuid4()), name="cluster_deletion", cluster_id=cluster.id) self.db.add(task) self.db.commit() kwargs = { 'task_uuid': task.uuid, 'status': 'ready', } receiver.remove_cluster_resp(**kwargs) notifications = self.db.query(Notification).all() self.assertEqual(len(notifications), 1) self.assertEqual(notifications[0].status, "unread") self.assertEqual(notifications[0].topic, "done") self.assertEqual( notifications[0].message, "Environment '%s' and all its nodes " "are deleted" % cluster_name)
def execute(self): logger.debug("Creating release dowload task") task = Task(name="download_release") db().add(task) db().commit() self._call_silently(task, tasks.DownloadReleaseTask, self.release_data) return task
def execute(self, data, check_admin_untagged=False): locked_tasks = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=TASK_NAMES.check_networks) locked_tasks = objects.TaskCollection.order_by(locked_tasks, 'id') check_networks = objects.TaskCollection.lock_for_update( locked_tasks).first() if check_networks: db().delete(check_networks) db().flush() task = Task(name=TASK_NAMES.check_networks, cluster=self.cluster) db().add(task) db().commit() self._call_silently(task, tasks.CheckNetworksTask, data, check_admin_untagged) task = objects.Task.get_by_uid(task.id, fail_if_not_found=True, lock_for_update=True) if task.status == TASK_STATUSES.running: # update task status with given data data = {'status': TASK_STATUSES.ready, 'progress': 100} objects.Task.update(task, data) db().commit() return task
def execute(self, nodes_to_deployment): # locking nodes for update objects.NodeCollection.lock_nodes(nodes_to_deployment) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format(' '.join( [n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def test_forced_deletion_of_running_transaction_(self): task = Task( name='deployment', cluster=self.cluster_db, status=consts.TASK_STATUSES.running, progress=10 ) self.db.add(task) self.db.flush() resp = self.app.delete( reverse( 'TransactionHandler', kwargs={'obj_id': task.id} ) + "?force=1", headers=self.default_headers ) self.assertEqual(resp.status_code, 204) resp = self.app.get( reverse( 'TransactionHandler', kwargs={'obj_id': task.id} ), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 404)
def test_hard_deletion_behavior(self): task = Task( name=consts.TASK_NAMES.deployment, cluster=self.cluster_db, status=consts.TASK_STATUSES.running, progress=10 ) self.db.add(task) self.db.flush() resp = self.app.delete( reverse( 'TransactionHandler', kwargs={'obj_id': task.id} ) + "?force=1", headers=self.default_headers ) self.assertEqual(resp.status_code, 204) resp = self.app.get( reverse( 'TransactionHandler', kwargs={'obj_id': task.id} ), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 404) self.assertIsNone(self.db().query(Task).get(task.id))
def test_recalculate_deployment_task_progress(self): cluster = self.create_env([{ 'roles': ['controller'], 'status': 'provisioned', 'progress': 100 }, { 'roles': ['compute'], 'status': 'deploying', 'progress': 100 }, { 'roles': ['compute'], 'status': 'ready', 'progress': 0 }, { 'roles': ['compute'], 'status': 'discover', 'progress': 0 }]) task = Task(name='deploy', cluster_id=cluster.id) self.db.add(task) self.db.commit() progress = TaskHelper.recalculate_deployment_task_progress(task) self.assertEquals(progress, 25)
def test_prepare_action_log_kwargs_with_web_ctx(self): self.env.create(nodes_kwargs=[ { 'roles': ['compute'], 'provisioning': True }, ]) cluster = self.env.clusters[0] task = Task(name='provision', cluster_id=cluster.id) self.db.add(task) self.db.flush() actor_id = 'xx' with mock.patch.dict(web.ctx, {'env': { 'fuel.action.actor_id': actor_id }}): kwargs = TaskHelper.prepare_action_log_kwargs(task) self.assertIn('actor_id', kwargs) self.assertEqual(actor_id, kwargs['actor_id']) with mock.patch.dict(web.ctx, {'env': {}}): kwargs = TaskHelper.prepare_action_log_kwargs(task) self.assertIn('actor_id', kwargs) self.assertIsNone(kwargs['actor_id'])
def test_forced_task_deletion(self): self.env.create( nodes_kwargs=[ {"roles": ["controller"]} ] ) task = Task( name='deployment', cluster=self.env.clusters[0], status='running', progress=10 ) self.db.add(task) self.db.commit() resp = self.app.delete( reverse( 'TaskHandler', kwargs={'obj_id': task.id} ) + "?force=0", headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 400) resp = self.app.delete( reverse( 'TaskHandler', kwargs={'obj_id': task.id} ) + "?force=1", headers=self.default_headers ) self.assertEqual(resp.status_code, 204)
def test_do_not_set_cluster_to_error_if_validation_failed(self): for task_name in ['check_before_deployment', 'check_networks']: supertask = Task(name='deploy', cluster=self.cluster, status='error') check_task = Task(name=task_name, cluster=self.cluster, status='error') supertask.subtasks.append(check_task) self.db.add(check_task) self.db.commit() TaskHelper.update_cluster_status(supertask.uuid) self.assertEquals(self.cluster.status, 'new')
def setUp(self): super(CheckRepositoryConnectionFromMasterNodeTaskTest, self).setUp() self.env.create(cluster_kwargs={ 'net_provider': 'neutron', 'net_segment_type': 'gre' }, nodes_kwargs=[{ 'roles': ['controller'] }]) self.env.create_node() self.task = Task(cluster_id=self.env.clusters[0].id) self.env.db.add(self.task) self.env.db.flush() self.url = 'url1' self.mocked_repositories = [{ 'type': 'deb', 'uri': self.url, 'suite': 'suite' }] self.patcher = mock.patch( 'nailgun.task.task.objects.Cluster.get_repo_urls', new=mock.Mock(return_value=self.mocked_repositories)) self.mrepos = self.patcher.start()
def execute(self): logger.info(u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id)) network_info = self.serialize_network_cfg(self.cluster) logger.info(u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4))) self._remove_obsolete_tasks() supertask = Task(name=consts.TASK_NAMES.deploy, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") # we should have task committed for processing in other threads db().commit() TaskHelper.create_action_log(supertask) mule.call_task_manager_async(self.__class__, '_execute_async', self.cluster.id, supertask.id) return supertask