def test_schedule_execution_while_execution_running_under_same_dep(self): """ Start an execution and while it is running schedule an execution for the future, under the same deployment. """ dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep_id, client=self.client) execution1 = self.client.executions.start(deployment_id=dep_id, workflow_id='install') self._wait_for_exec_to_end_and_modify_status(execution1, Execution.STARTED) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self.client.executions.update(execution1.id, Execution.TERMINATED) self.wait_for_scheduled_execution_to_fire(dep_id) schedule = self.client.execution_schedules.list( deployment_id=dep.id)[0] self.client.execution_schedules.delete(schedule.id, dep_id)
def test_two_scheduled_execution_same_tenant(self): """ Schedule 2 executions to start a second apart. """ dsl_path = resource('dsl/basic.yaml') dep1 = self.deploy(dsl_path, wait=False, client=self.client) dep2 = self.deploy(dsl_path, wait=False, client=self.client) dep1_id = dep1.id dep2_id = dep2.id do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep1_id, client=self.client) do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep2_id, client=self.client) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id=dep1_id, workflow_id='install', schedule=scheduled_time) self.client.executions.start(deployment_id=dep2_id, workflow_id='install', schedule=scheduled_time) self.wait_for_scheduled_execution_to_fire(dep1_id) self.wait_for_scheduled_execution_to_fire(dep2_id) schedule1 = self.client.execution_schedules.list( deployment_id=dep1.id)[0] schedule2 = self.client.execution_schedules.list( deployment_id=dep2.id)[0] self.client.execution_schedules.delete(schedule1.id, dep1_id) self.client.execution_schedules.delete(schedule2.id, dep2_id)
def test_schedule_execution_and_create_snapshot_same_tenant(self): """ Schedule an execution, then create snapshot. Execution 'wakes up' while snapshot is still running, so it becomes 'queued' and start when snapshot terminates. """ dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep_id, client=self.client) # Create snapshot and keep it's status 'started' snapshot = self._create_snapshot_and_modify_execution_status( Execution.STARTED) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) execution = self.wait_for_scheduled_execution_to_fire(dep_id) self._assert_execution_status(execution.id, Execution.QUEUED) self.client.executions.update(snapshot.id, Execution.TERMINATED) self.wait_for_execution_to_end(execution) schedule = self.client.execution_schedules.list( deployment_id=dep.id)[0] self.client.execution_schedules.delete(schedule.id, dep_id)
def _execute_from_resource(self, workflow_id, workflow_params=None, resource_file=None): dsl_path = resource(resource_file) _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) wait_for_blueprint_upload(blueprint_id, self.client, True) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=deployment_id, client=self.client) execution = self.client.executions.start(deployment_id, workflow_id, parameters=workflow_params) node_inst_id = self.client.node_instances.list( deployment_id=deployment_id)[0].id return execution, node_inst_id, deployment_id
def test_schedule_execution_while_execution_running_under_same_dep(self): """ Start an execution and while it is running schedule an execution for the future, under the same deployment. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) execution1 = self.client.executions.start(deployment_id=dep_id, workflow_id='install') self._wait_for_exec_to_end_and_modify_status(execution1, Execution.STARTED) scheduled_time = generate_scheduled_for_date() execution2 = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution2.id, Execution.SCHEDULED) self.client.executions.update(execution1.id, Execution.TERMINATED) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution2)
def test_execute_and_kill_execution(self): """ Tests the kill execution option by asserting the execution pid doesn't exist. """ dsl_path = resource('dsl/write_pid_node.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep.id, client=self.client) execution = self.client.executions.start(deployment_id=dep.id, workflow_id='install') pid = do_retries(self.read_manager_file, timeout_seconds=60, file_path='/tmp/pid.txt') path = '/proc/{}/status'.format(pid) execution = self.client.executions.cancel(execution.id, force=True, kill=True) self.assertEqual(Execution.KILL_CANCELLING, execution.status) # If the process is still running self.read_manager_file will raise # an error. # We use do_retries to give the kill cancel operation time to kill # the process. do_retries(self.assertRaises, expected_exception=subprocess.CalledProcessError, callableObj=self.read_manager_file, file_path=path)
def _execute_and_cancel_execution(self, force=False, kill_cancel=False, wait_for_component=True, verify_intermediate_state=True): # component's blueprint sleep_blueprint = resource('dsl/sleep_node.yaml') self.client.blueprints.upload(sleep_blueprint, entity_id='basic') main_blueprint = resource( 'dsl/component_with_blueprint_id.yaml') test_id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(test_id) deployment_id = 'deployment_{0}'.format(test_id) self.client.blueprints.upload(main_blueprint, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) execution = self.client.executions.start(deployment_id, 'install') if wait_for_component: self._wait_for_component_install('component') execution = self.client.executions.cancel(execution.id, force, kill=kill_cancel) self._verify_cancel_install_execution(execution, force, kill_cancel, verify_intermediate_state) return execution
def test_schedule_execution_and_create_snapshot_same_tenant(self): """ Schedule an execution, then create snapshot. Execution 'wakes up' while snapshot is still running, so it becomes 'queued' and start when snapshot terminates. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution.id, Execution.SCHEDULED) # Create snapshot and keep it's status 'started' snapshot = self._create_snapshot_and_modify_execution_status( Execution.STARTED) time.sleep(62) # Wait for exec to 'wake up' self._assert_execution_status(execution.id, Execution.QUEUED) self.client.executions.update(snapshot.id, Execution.TERMINATED) self.wait_for_execution_to_end(execution)
def test_two_scheduled_execution_same_tenant(self): """ Schedule 2 executions to start a second apart. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/basic.yaml') dep1 = self.deploy(dsl_path, wait=False, client=self.client) dep2 = self.deploy(dsl_path, wait=False, client=self.client) dep1_id = dep1.id dep2_id = dep2.id do_retries(verify_deployment_env_created, 30, deployment_id=dep1_id) do_retries(verify_deployment_env_created, 30, deployment_id=dep2_id) scheduled_time = generate_scheduled_for_date() execution1 = self.client.executions.start(deployment_id=dep1_id, workflow_id='install', schedule=scheduled_time) execution2 = self.client.executions.start(deployment_id=dep2_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution1.id, Execution.SCHEDULED) self._assert_execution_status(execution2.id, Execution.SCHEDULED) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution1) self.wait_for_execution_to_end(execution2)
def test_workflow_parameters_pass_from_blueprint(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) wait_for_blueprint_upload(blueprint_id, self.client) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, client=self.client, deployment_id=deployment_id) execution = self.client.executions.start(deployment_id, 'custom_execute_operation') self.wait_for_execution_to_end(execution) node_id = self.client.node_instances.list( deployment_id=deployment_id)[0].id node_instance = self.client.node_instances.get(node_id) invocations = node_instance.runtime_properties[ 'mock_operation_invocation'] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'test_key': 'test_value'})
def _execute_and_cancel_execution(self, force=False, kill_cancel=False, wait_for_component=True, verify_intermediate_state=True): # component's blueprint sleep_blueprint = resource('dsl/sleep_node.yaml') self.client.blueprints.upload(sleep_blueprint, entity_id='basic') main_blueprint = resource( 'dsl/component_with_blueprint_id.yaml') test_id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(test_id) deployment_id = 'deployment_{0}'.format(test_id) self.client.blueprints.upload(main_blueprint, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) execution = self.client.executions.start(deployment_id, 'install') if wait_for_component: self._wait_for_component_install('component') execution = self.client.executions.cancel(execution.id, force, kill=kill_cancel) self._verify_cancel_install_execution(execution, force, kill_cancel, verify_intermediate_state) return execution
def _verify_cancel_install_execution(self, execution, force, kill_cancel, verify_intermediate_state=True): expected_status = Execution.CANCELLING if force: expected_status = Execution.FORCE_CANCELLING elif kill_cancel: expected_status = Execution.KILL_CANCELLING if verify_intermediate_state: do_retries( self.assertEqual, first=expected_status, second=execution.status, ) executions = self.client.executions.list(workflow_id='install') for execution in executions: self.wait_for_execution_to_end(execution) # Asserting all is finished in the correct state executions = self.client.executions.list(workflow_id='install') for execution in executions: self.assertEqual(Execution.CANCELLED, execution.status) return executions
def test_schedule_execution_while_execution_running_under_same_dep(self): """ Start an execution and while it is running schedule an execution for the future, under the same deployment. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) execution1 = self.client.executions.start(deployment_id=dep_id, workflow_id='install') self._wait_for_exec_to_end_and_modify_status(execution1, Execution.STARTED) scheduled_time = generate_scheduled_for_date() execution2 = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution2.id, Execution.SCHEDULED) self.client.executions.update(execution1.id, Execution.TERMINATED) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution2)
def test_schedule_execution_and_create_snapshot_same_tenant(self): """ Schedule an execution, then create snapshot. Execution 'wakes up' while snapshot is still running, so it becomes 'queued' and start when snapshot terminates. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) # Create snapshot and keep it's status 'started' snapshot = self._create_snapshot_and_modify_execution_status( Execution.STARTED) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution.id, Execution.SCHEDULED) time.sleep(62) # Wait for exec to 'wake up' self._assert_execution_status(execution.id, Execution.QUEUED) self.client.executions.update(snapshot.id, Execution.TERMINATED) self.wait_for_execution_to_end(execution)
def test_two_scheduled_execution_same_tenant(self): """ Schedule 2 executions to start a second apart. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/basic.yaml') dep1 = self.deploy(dsl_path, wait=False, client=self.client) dep2 = self.deploy(dsl_path, wait=False, client=self.client) dep1_id = dep1.id dep2_id = dep2.id do_retries(verify_deployment_env_created, 30, deployment_id=dep1_id) do_retries(verify_deployment_env_created, 30, deployment_id=dep2_id) scheduled_time = generate_scheduled_for_date() execution1 = self.client.executions.start(deployment_id=dep1_id, workflow_id='install', schedule=scheduled_time) execution2 = self.client.executions.start(deployment_id=dep2_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution1.id, Execution.SCHEDULED) self._assert_execution_status(execution2.id, Execution.SCHEDULED) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution1) self.wait_for_execution_to_end(execution2)
def wait_for_deployment_environment(self, deployment_id): do_retries( verify_deployment_env_created, container_id=self.env.container_id, deployment_id=deployment_id, client=self.client, timeout_seconds=60 )
def _wait_for_component_deployment(self, deployment_id, client=None, timeout_seconds=60): # waiting for component to create it's deployment client = client or self.client do_retries(verify_deployment_env_created, container_id=self.env.container_id, deployment_id=deployment_id, client=client, timeout_seconds=timeout_seconds)
def test_execution_parameters(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) wait_for_blueprint_upload(blueprint_id, self.client, True) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 60, container_id=self.env.container_id, deployment_id=deployment_id, client=self.client) execution_parameters = { 'operation': 'test_interface.operation', 'properties': { 'key': 'different-key', 'value': 'different-value' }, 'custom-parameter': "doesn't matter" } execution = self.client.executions.start( deployment_id, 'another_execute_operation', parameters=execution_parameters, allow_custom_parameters=True) self.wait_for_execution_to_end(execution) invocations = self.get_runtime_property(deployment_id, 'mock_operation_invocation')[0] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'different-key': 'different-value'}) # checking for execution parameters - expecting there to be a merge # with overrides with workflow parameters. expected_params = { 'node_id': 'test_node', 'operation': 'test_interface.operation', 'properties': { 'key': 'different-key', 'value': 'different-value' }, 'custom-parameter': "doesn't matter" } self.assertEqual(expected_params, execution.parameters)
def _wait_for_component_install(self, deployment_id): # waiting for component to create it's deployment do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) # Waiting for the sleep node to start node_instance_id = self.client.node_instances.list( deployment_id=deployment_id)[0].id for retry in range(30): if self.client.node_instances.get( node_instance_id).state == 'creating': break time.sleep(1) else: raise RuntimeError("sleep node instance was expected to go" " into 'creating' status")
def test_hello_world(self): blueprint_file = self._prepare_hello_world() deployment, _ = self.test_case.deploy_application( blueprint_file, timeout_seconds=120) self._assert_hello_world_events(deployment.id) ip = self.test_case.get_host_ip(node_id='vm', deployment_id=deployment.id) url = 'http://{0}:8080'.format(ip) def _webserver_request(): return requests.get(url, timeout=1) # assert webserver running response = test_utils.do_retries( _webserver_request, exception_class=requests.exceptions.ConnectionError) self.test_case.assertIn('http_web_server', response.text) if not self.skip_uninstall: self.test_case.undeploy_application(deployment.id) # assert webserver not running self.test_case.assertRaises(requests.exceptions.ConnectionError, _webserver_request) return deployment
def _wait_for_component_install(self, deployment_id): # waiting for component to create it's deployment do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) # Waiting for the sleep node to start node_instance_id = self.client.node_instances.list( deployment_id=deployment_id)[0].id for retry in range(30): if self.client.node_instances.get( node_instance_id).state == 'creating': break time.sleep(1) else: raise RuntimeError("sleep node instance was expected to go" " into 'creating' status")
def test_hello_world(self): blueprint_file = self._prepare_hello_world() deployment, _ = self.test_case.deploy_application( blueprint_file, timeout_seconds=120) self._assert_hello_world_events(deployment.id) self._assert_hello_world_metric(deployment.id) ip = self.test_case.get_host_ip(node_id='vm', deployment_id=deployment.id) url = 'http://{0}:8080'.format(ip) def _webserver_request(): return requests.get(url, timeout=1) # assert webserver running response = test_utils.do_retries( _webserver_request, exception_class=requests.exceptions.ConnectionError) self.test_case.assertIn('http_web_server', response.text) if not self.skip_uninstall: self.test_case.undeploy_application(deployment.id) # assert webserver not running self.test_case.assertRaises(requests.exceptions.ConnectionError, _webserver_request) return deployment
def _execute_from_resource(self, workflow_id, workflow_params=None, resource_file=None): dsl_path = resource(resource_file) _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) execution = self.client.executions.start( deployment_id, workflow_id, parameters=workflow_params) node_inst_id = self.client.node_instances.list( deployment_id=deployment_id)[0].id return execution, node_inst_id, deployment_id
def test_scheduled_execution(self): # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/basic.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self.assertEquals(Execution.SCHEDULED, execution.status) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution)
def test_scheduled_execution(self): # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/basic.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self.assertEquals(Execution.SCHEDULED, execution.status) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution)
def wait_for_invocations(self, deployment_id, expected_count): def assertion(): invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id )['mock_operation_invocation'] self.assertEqual(expected_count, len(invocations)) return invocations return do_retries(assertion)
def test_execution_parameters(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 60, deployment_id=deployment_id) execution_parameters = { 'operation': 'test_interface.operation', 'properties': { 'key': 'different-key', 'value': 'different-value' }, 'custom-parameter': "doesn't matter" } execution = self.client.executions.start( deployment_id, 'another_execute_operation', parameters=execution_parameters, allow_custom_parameters=True) self.wait_for_execution_to_end(execution) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id )['mock_operation_invocation'] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'different-key': 'different-value'}) # checking for execution parameters - expecting there to be a merge # with overrides with workflow parameters. expected_params = { 'node_id': 'test_node', 'operation': 'test_interface.operation', 'properties': { 'key': 'different-key', 'value': 'different-value' }, 'custom-parameter': "doesn't matter" } self.assertEqual(expected_params, execution.parameters)
def test_workflow_parameters_pass_from_blueprint(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, 30, deployment_id=deployment_id) execution = self.client.executions.start(deployment_id, 'custom_execute_operation') self.wait_for_execution_to_end(execution) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id)['mock_operation_invocation'] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'test_key': 'test_value'})
def _execute_and_cancel_execution(self, workflow_id, force=False, wait_for_termination=True, is_wait_for_asleep_node=True, workflow_params=None): dsl_path = resource('dsl/sleep_workflows.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_environment_creation_complete, 30, deployment_id=deployment_id) execution = self.client.executions.start(deployment_id, workflow_id, parameters=workflow_params) node_inst_id = self.client.node_instances.list( deployment_id=deployment_id)[0].id if is_wait_for_asleep_node: for retry in range(30): if self.client.node_instances.get( node_inst_id).state == 'asleep': break time.sleep(1) else: raise RuntimeError("Execution was expected to go" " into 'sleeping' status") execution = self.client.executions.cancel(execution.id, force) expected_status = Execution.FORCE_CANCELLING if force else \ Execution.CANCELLING self.assertEquals(expected_status, execution.status) if wait_for_termination: self.wait_for_execution_to_end(execution) execution = self.client.executions.get(execution.id) return execution, deployment_id
def test_workflow_parameters_pass_from_blueprint(self): dsl_path = resource('dsl/workflow_parameters.yaml') _id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(_id) deployment_id = 'deployment_{0}'.format(_id) self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) execution = self.client.executions.start(deployment_id, 'custom_execute_operation') self.wait_for_execution_to_end(execution) invocations = self.get_plugin_data( plugin_name='testmockoperations', deployment_id=deployment_id )['mock_operation_invocation'] self.assertEqual(1, len(invocations)) self.assertDictEqual(invocations[0], {'test_key': 'test_value'})
def test_execute_and_kill_execution(self): """ Tests the kill execution option by asserting the execution pid doesn't exist. """ dsl_path = resource('dsl/write_pid_node.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) do_retries(verify_deployment_env_created, 30, deployment_id=dep.id) execution = self.client.executions.start(deployment_id=dep.id, workflow_id='install') pid = do_retries(docl.read_file, file_path='/tmp/pid.txt') path = '/proc/{}/status'.format(pid) execution = self.client.executions.cancel(execution.id, force=True, kill=True) self.assertEquals(Execution.KILL_CANCELLING, execution.status) # If the process is still running docl.read_file will raise an error. # We use do_retries to give the kill cancel operation time to kill # the process. do_retries(self.assertRaises, excClass=ErrorReturnCode, callableObj=docl.read_file, file_path=path)
def test_scheduled_execution(self): dsl_path = resource('dsl/basic.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep_id, client=self.client) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) schedule = self.client.execution_schedules.list( deployment_id=dep.id)[0] self.assertEqual(schedule.workflow_id, 'install') self.assertIn('install_', schedule.id) self.wait_for_scheduled_execution_to_fire(dep_id) self.client.execution_schedules.delete(schedule.id, dep_id)
def test_three_level_cascading_cancel_and_resume(self): """ This test does not wait for the components to be created, so the resume operation will complete the install. """ dsl_path = resource('dsl/sleep_node.yaml') self.client.blueprints.upload(dsl_path, entity_id='sleep') layer_1 = """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml node_templates: component_node: type: cloudify.nodes.Component properties: resource_config: blueprint: external_resource: true id: sleep deployment: id: sleep_component """ layer_1_path = self.make_yaml_file(layer_1) self.client.blueprints.upload(layer_1_path, entity_id='layer_1') layer_2 = """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml node_templates: component_node: type: cloudify.nodes.Component properties: resource_config: blueprint: external_resource: true id: layer_1 deployment: id: component """ layer_2_path = self.make_yaml_file(layer_2) test_id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(test_id) deployment_id = 'deployment_{0}'.format(test_id) self.client.blueprints.upload(layer_2_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) main_execution = self.client.executions.start(deployment_id, 'install') main_execution = self.client.executions.cancel(main_execution.id) self._verify_cancel_install_execution(main_execution, False, False) self._resume_and_verify_executions_end(main_execution, 3)
def test_three_level_cascading_cancel_and_resume(self): """ This test does not wait for the components to be created, so the resume operation will complete the install. """ dsl_path = resource('dsl/sleep_node.yaml') self.client.blueprints.upload(dsl_path, entity_id='sleep') layer_1 = """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml node_templates: component_node: type: cloudify.nodes.Component properties: resource_config: blueprint: external_resource: true id: sleep deployment: id: sleep_component """ layer_1_path = self.make_yaml_file(layer_1) self.client.blueprints.upload(layer_1_path, entity_id='layer_1') layer_2 = """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml node_templates: component_node: type: cloudify.nodes.Component properties: resource_config: blueprint: external_resource: true id: layer_1 deployment: id: component """ layer_2_path = self.make_yaml_file(layer_2) test_id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(test_id) deployment_id = 'deployment_{0}'.format(test_id) self.client.blueprints.upload(layer_2_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) main_execution = self.client.executions.start(deployment_id, 'install') main_execution = self.client.executions.cancel(main_execution.id) self._verify_cancel_install_execution(main_execution, False, False) self._resume_and_verify_executions_end(main_execution, 3)
def test_delete_deployment(self): dsl_path = get_resource("dsl/basic.yaml") blueprint_id = self.id() deployment_id = str(uuid.uuid4()) def change_execution_status(execution_id, status): self.client.executions.update(execution_id, status) updated_execution = self.client.executions.get(deployment_id) self.assertEqual(status, updated_execution.status) @contextmanager def client_error_check(expect_in_error_message, failer_message): try: yield self.fail(failer_message) except CloudifyClientError as exc: self.assertTrue(expect_in_error_message in str(exc)) # verifying a deletion of a new deployment, i.e. one which hasn't # been installed yet, and therefore all its nodes are still in # 'uninitialized' state. self.client.blueprints.upload(dsl_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id) do_retries(verify_deployment_environment_creation_complete, timeout_seconds=30, deployment_id=deployment_id) self.delete_deployment(deployment_id, ignore_live_nodes=False) self.client.blueprints.delete(blueprint_id) # recreating the deployment, this time actually deploying it too _, execution_id = self.deploy_application(dsl_path, blueprint_id=blueprint_id, deployment_id=deployment_id, wait_for_execution=True) execution = self.client.executions.get(execution_id) self.assertEqual(Execution.TERMINATED, execution.status) # verifying deployment exists deployment = self.client.deployments.get(deployment_id) self.assertEqual(deployment_id, deployment.id) # retrieving deployment nodes nodes = self.client.node_instances.list(deployment_id=deployment_id) self.assertTrue(len(nodes) > 0) # setting one node's state to 'started' (making it a 'live' node) # node must be read using get in order for it to have a version. node = self.client.node_instances.get(nodes[0].id) self.client.node_instances.update(node.id, state='started', version=node.version) modification = self.client.deployment_modifications.start( deployment_id, nodes={'webserver_host': { 'instances': 2 }}) self.client.deployment_modifications.finish(modification.id) # get updated node instances list nodes = self.client.node_instances.list(deployment_id=deployment_id) self.assertTrue(len(nodes) > 0) nodes_ids = [_node.id for _node in nodes] # attempting to delete deployment - should fail because there are # live nodes for this deployment with client_error_check( failer_message='Deleted deployment {0} successfully even ' 'though it should have had live nodes and the ' 'ignore_live_nodes flag was set to False'.format( deployment_id), expect_in_error_message='live nodes'): self.delete_deployment(deployment_id) # deleting deployment - this time there's no execution running, # and using the ignore_live_nodes parameter to force deletion deleted_deployment_id = self.delete_deployment(deployment_id, True).id self.assertEqual(deployment_id, deleted_deployment_id) # verifying deployment does no longer exist with client_error_check( failer_message="Got deployment {0} successfully even though " "it wasn't expected to exist".format(deployment_id), expect_in_error_message='not found'): self.client.deployments.get(deployment_id) # verifying deployment's execution does no longer exist with client_error_check( failer_message='execution {0} still exists even though it ' 'should have been deleted when its deployment ' 'was deleted'.format(execution_id), expect_in_error_message='not found'): self.client.executions.get(execution_id) # verifying deployment modification no longer exists with client_error_check( failer_message='deployment modification {0} still exists even ' 'though it should have been deleted when its ' 'deployment was deleted', expect_in_error_message='not found'): self.client.deployment_modifications.get(modification.id) # verifying deployment's nodes do no longer exist for node_id in nodes_ids: with client_error_check( failer_message='node {0} still exists even though it ' 'should have been deleted when its ' 'deployment was deleted'.format(node_id), expect_in_error_message='not found'): self.client.node_instances.get(node_id) # trying to delete a nonexistent deployment with client_error_check( failer_message="Deleted deployment {0} successfully even " "though it wasn't expected to exist".format(deployment_id), expect_in_error_message='not found'): self.delete_deployment(deployment_id)
def test_three_level_cascading_cancel(self): sleep = resource('dsl/sleep_node.yaml') self.client.blueprints.upload(sleep, entity_id='sleep') layer_1 = """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml node_templates: component_node: type: cloudify.nodes.Component properties: resource_config: blueprint: external_resource: true id: sleep deployment: id: sleep_component """ layer_1_path = self.make_yaml_file(layer_1) self.client.blueprints.upload(layer_1_path, entity_id='layer_1') layer_2 = """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml node_templates: component_node: type: cloudify.nodes.Component properties: resource_config: blueprint: external_resource: true id: layer_1 deployment: id: component """ layer_2_path = self.make_yaml_file(layer_2) test_id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(test_id) deployment_id = 'deployment_{0}'.format(test_id) self.client.blueprints.upload(layer_2_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) main_execution = self.client.executions.start(deployment_id, 'install') self._wait_for_component_install(deployment_id='component') main_execution = self.client.executions.cancel(main_execution.id) executions = self._verify_cancel_install_execution(main_execution, False, False) # The number of executions depends when the cancel occurred self.assertLessEqual(len(executions), 3)
def test_three_level_cascading_cancel(self): sleep = resource('dsl/sleep_node.yaml') self.client.blueprints.upload(sleep, entity_id='sleep') layer_1 = """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml node_templates: component_node: type: cloudify.nodes.Component properties: resource_config: blueprint: external_resource: true id: sleep deployment: id: sleep_component """ layer_1_path = self.make_yaml_file(layer_1) self.client.blueprints.upload(layer_1_path, entity_id='layer_1') layer_2 = """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml node_templates: component_node: type: cloudify.nodes.Component properties: resource_config: blueprint: external_resource: true id: layer_1 deployment: id: component """ layer_2_path = self.make_yaml_file(layer_2) test_id = uuid.uuid1() blueprint_id = 'blueprint_{0}'.format(test_id) deployment_id = 'deployment_{0}'.format(test_id) self.client.blueprints.upload(layer_2_path, blueprint_id) self.client.deployments.create(blueprint_id, deployment_id, skip_plugins_validation=True) do_retries(verify_deployment_env_created, 30, deployment_id=deployment_id) main_execution = self.client.executions.start(deployment_id, 'install') self._wait_for_component_install(deployment_id='component') main_execution = self.client.executions.cancel(main_execution.id) executions = self._verify_cancel_install_execution(main_execution, False, False) # The number of executions depends when the cancel occurred self.assertLessEqual(len(executions), 3)
def do_assertions(assertions_func, timeout=10, **kwargs): return test_utils.do_retries(assertions_func, timeout, AssertionError, **kwargs)
def get_node_count(url): return test_utils.do_retries( get_node_count_impl, url=url, timeout_seconds=60)