def _execute_and_cancel_execution(self, workflow_id, force=False, wait_for_termination=True, is_wait_for_asleep_node=True): dsl_path = resource('dsl/sleep_workflows.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, 30, deployment_id=deployment_id) execution = self.client.executions.start( deployment_id, workflow_id) node_inst_id = self.client.node_instances.list(deployment_id)[0].id if is_wait_for_asleep_node: for retry in range(30): if self.client.node_instances.get( node_inst_id).state == 'asleep': break time.sleep(1) else: raise RuntimeError("Execution was expected to go" " into 'sleeping' status") execution = self.client.executions.cancel(execution.id, force) expected_status = Execution.FORCE_CANCELLING if force else \ Execution.CANCELLING self.assertEquals(expected_status, execution.status) if wait_for_termination: wait_for_execution_to_end(execution) execution = self.client.executions.get(execution.id) return execution, deployment_id
def wait_for_invocations(self, deployment_id, expected_count): def assertion(): _invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id)['mock_operation_invocation'] self.assertEqual(expected_count, len(_invocations)) utils.do_retries(assertion) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id)['mock_operation_invocation'] return invocations
def wait_for_invocations(self, deployment_id, expected_count): def assertion(): _invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id )['mock_operation_invocation'] self.assertEqual(expected_count, len(_invocations)) utils.do_retries(assertion) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id )['mock_operation_invocation'] return invocations
def wait_for_invocations(self, deployment_id, expected_count): def assertion(): invocations = self.get_plugin_data(plugin_name="testmockoperations", deployment_id=deployment_id)[ "mock_operation_invocation" ] self.assertEqual(expected_count, len(invocations)) return invocations return utils.do_retries(assertion)
def test_workflow_parameters_pass_from_blueprint(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, 30, deployment_id=deployment_id) execution = self.client.executions.start(deployment_id, 'custom_execute_operation') wait_for_execution_to_end(execution) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id)['mock_operation_invocation'] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'test_key': 'test_value'})
def test_workflow_parameters_pass_from_blueprint(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, 30, deployment_id=deployment_id) execution = self.client.executions.start(deployment_id, 'execute_operation') wait_for_execution_to_end(execution) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id )['mock_operation_invocation'] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'test_key': 'test_value'})
def test_execution_parameters(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, 60, deployment_id=deployment_id) execution_parameters = { 'operation': 'test_interface.operation', 'properties': { 'key': 'different-key', 'value': 'different-value' }, 'custom-parameter': "doesn't matter" } execution = self.client.executions.start( deployment_id, 'another_execute_operation', parameters=execution_parameters, allow_custom_parameters=True) wait_for_execution_to_end(execution) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id)['mock_operation_invocation'] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'different-key': 'different-value'}) # checking for execution parameters - expecting there to be a merge # with overrides with workflow parameters. expected_params = { 'node_id': 'test_node', 'operation': 'test_interface.operation', 'properties': { 'key': 'different-key', 'value': 'different-value' }, 'custom-parameter': "doesn't matter" } self.assertEqual(expected_params, execution.parameters)
def test_execution_parameters(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, 30, deployment_id=deployment_id) execution_parameters = { 'operation': 'test_interface.operation', 'properties': { 'key': 'different-key', 'value': 'different-value' }, 'custom-parameter': "doesn't matter" } execution = self.client.executions.start( deployment_id, 'another_execute_operation', parameters=execution_parameters, allow_custom_parameters=True) wait_for_execution_to_end(execution) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id )['mock_operation_invocation'] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'different-key': 'different-value'}) # checking for execution parameters - expecting there to be a merge # with overrides with workflow parameters. expected_params = { 'node_id': 'test_node', 'operation': 'test_interface.operation', 'properties': { 'key': 'different-key', 'value': 'different-value' }, 'custom-parameter': "doesn't matter" } self.assertEqual(expected_params, execution.parameters)
def do_assertions(assertions_func, timeout=10, **kwargs): return utils.do_retries(assertions_func, timeout, AssertionError, **kwargs)
def test_delete_deployment(self): dsl_path = resource("dsl/basic.yaml") blueprint_id = self.id() deployment_id = str(uuid.uuid4()) def change_execution_status(_execution_id, status): self.client.executions.update(_execution_id, status) time.sleep(2) # waiting for elasticsearch to update... executions = self.client.executions.list(deployment_id) updated_execution = next(execution for execution in executions if execution.id == _execution_id) self.assertEqual(status, updated_execution.status) # verifying a deletion of a new deployment, i.e. one which hasn't # been installed yet, and therefore all its nodes are still in # 'uninitialized' state. self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, 30, deployment_id=deployment_id) self.client.deployments.delete(deployment_id, False) time.sleep(5) # elasticsearch... self.client.blueprints.delete(blueprint_id) # recreating the deployment, this time actually deploying it too _, execution_id = deploy(dsl_path, blueprint_id=blueprint_id, deployment_id=deployment_id, wait_for_execution=True) # execution is supposed to be 'terminated' anyway, but verifying it # anyway (plus elasticsearch might need time to update..) change_execution_status(execution_id, Execution.TERMINATED) # verifying deployment exists result = self.client.deployments.get(deployment_id) self.assertEqual(deployment_id, result.id) # retrieving deployment nodes nodes = self.client.node_instances.list(deployment_id=deployment_id) self.assertTrue(len(nodes) > 0) nodes_ids = [node.id for node in nodes] # setting one node's state to 'started' (making it a 'live' node) # node must be read using get in order for it to have a version. node = self.client.node_instances.get(nodes[0].id) self.client.node_instances.update(node.id, state='started', version=node.version) # setting the execution's status to 'started' so it'll prevent the # deployment deletion change_execution_status(execution_id, Execution.STARTED) # attempting to delete the deployment - should fail because the # execution is active try: self.client.deployments.delete(deployment_id) self.fail("Deleted deployment {0} successfully even though it " "should have had a running execution" .format(deployment_id)) except CloudifyClientError, e: self.assertTrue('running executions' in str(e))
def test_delete_deployment(self): dsl_path = resource("dsl/basic.yaml") blueprint_id = self.id() deployment_id = str(uuid.uuid4()) def change_execution_status(_execution_id, status): self.client.executions.update(_execution_id, status) executions = self.client.executions.list(deployment_id) updated_execution = next(execution for execution in executions if execution.id == _execution_id) self.assertEqual(status, updated_execution.status) # verifying a deletion of a new deployment, i.e. one which hasn't # been installed yet, and therefore all its nodes are still in # 'uninitialized' state. self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, 30, deployment_id=deployment_id) delete_deployment(deployment_id, False) self.client.blueprints.delete(blueprint_id) # recreating the deployment, this time actually deploying it too _, execution_id = deploy(dsl_path, blueprint_id=blueprint_id, deployment_id=deployment_id, wait_for_execution=True) execs = self.client.executions.list(include_system_workflows=True) self.assertEqual(Execution.TERMINATED, next(execution for execution in execs if execution.id == execution_id).status) # verifying deployment exists result = self.client.deployments.get(deployment_id) self.assertEqual(deployment_id, result.id) # retrieving deployment nodes nodes = self.client.node_instances.list(deployment_id=deployment_id) self.assertTrue(len(nodes) > 0) # setting one node's state to 'started' (making it a 'live' node) # node must be read using get in order for it to have a version. node = self.client.node_instances.get(nodes[0].id) self.client.node_instances.update(node.id, state='started', version=node.version) # setting the execution's status to 'started' so it'll prevent the # deployment deletion change_execution_status(execution_id, Execution.STARTED) # attempting to delete the deployment - should fail because the # execution is active try: delete_deployment(deployment_id) self.fail("Deleted deployment {0} successfully even though it " "should have had a running execution" .format(deployment_id)) except CloudifyClientError, e: self.assertTrue('running executions' in str(e))
def test_delete_deployment(self): dsl_path = get_resource("dsl/basic.yaml") blueprint_id = self.id() deployment_id = str(uuid.uuid4()) def change_execution_status(execution_id, status): self.client.executions.update(execution_id, status) updated_execution = self.client.executions.get(deployment_id) self.assertEqual(status, updated_execution.status) @contextmanager def client_error_check(expect_in_error_message, failer_message): try: yield self.fail(failer_message) except CloudifyClientError as exc: self.assertTrue(expect_in_error_message in str(exc)) # verifying a deletion of a new deployment, i.e. one which hasn't # been installed yet, and therefore all its nodes are still in # 'uninitialized' state. self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, timeout_seconds=30, deployment_id=deployment_id) delete_deployment(deployment_id, ignore_live_nodes=False) self.client.blueprints.delete(blueprint_id) # recreating the deployment, this time actually deploying it too _, execution_id = deploy_application( dsl_path, blueprint_id=blueprint_id, deployment_id=deployment_id, wait_for_execution=True) execution = self.client.executions.get(execution_id) self.assertEqual(Execution.TERMINATED, execution.status) # verifying deployment exists deployment = self.client.deployments.get(deployment_id) self.assertEqual(deployment_id, deployment.id) # retrieving deployment nodes nodes = self.client.node_instances.list(deployment_id=deployment_id) self.assertTrue(len(nodes) > 0) # setting one node's state to 'started' (making it a 'live' node) # node must be read using get in order for it to have a version. node = self.client.node_instances.get(nodes[0].id) self.client.node_instances.update( node.id, state='started', version=node.version) modification = self.client.deployment_modifications.start( deployment_id, nodes={'webserver_host': {'instances': 2}}) self.client.deployment_modifications.finish(modification.id) # get updated node instances list nodes = self.client.node_instances.list(deployment_id=deployment_id) self.assertTrue(len(nodes) > 0) nodes_ids = [_node.id for _node in nodes] # attempting to delete deployment - should fail because there are # live nodes for this deployment with client_error_check( failer_message='Deleted deployment {0} successfully even ' 'though it should have had live nodes and the ' 'ignore_live_nodes flag was set to False' .format(deployment_id), expect_in_error_message='live nodes'): delete_deployment(deployment_id) # deleting deployment - this time there's no execution running, # and using the ignore_live_nodes parameter to force deletion deleted_deployment_id = delete_deployment(deployment_id, True).id self.assertEqual(deployment_id, deleted_deployment_id) # verifying deployment does no longer exist with client_error_check( failer_message="Got deployment {0} successfully even though " "it wasn't expected to exist" .format(deployment_id), expect_in_error_message='not found'): self.client.deployments.get(deployment_id) # verifying deployment's execution does no longer exist with client_error_check( failer_message='execution {0} still exists even though it ' 'should have been deleted when its deployment ' 'was deleted'.format(execution_id), expect_in_error_message='not found'): self.client.executions.get(execution_id) # verifying deployment modification no longer exists with client_error_check( failer_message='deployment modification {0} still exists even ' 'though it should have been deleted when its ' 'deployment was deleted', expect_in_error_message='not found'): self.client.deployment_modifications.get(modification.id) # verifying deployment's nodes do no longer exist for node_id in nodes_ids: with client_error_check( failer_message='node {0} still exists even though it ' 'should have been deleted when its ' 'deployment was deleted'.format(node_id), expect_in_error_message='not found'): self.client.node_instances.get(node_id) # trying to delete a nonexistent deployment with client_error_check( failer_message="Deleted deployment {0} successfully even " "though it wasn't expected to exist" .format(deployment_id), expect_in_error_message='not found'): delete_deployment(deployment_id)