def test_schedule_execution_and_create_snapshot_same_tenant(self): """ Schedule an execution, then create snapshot. Execution 'wakes up' while snapshot is still running, so it becomes 'queued' and start when snapshot terminates. """ dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep_id, client=self.client) # Create snapshot and keep it's status 'started' snapshot = self._create_snapshot_and_modify_execution_status( Execution.STARTED) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) execution = self.wait_for_scheduled_execution_to_fire(dep_id) self._assert_execution_status(execution.id, Execution.QUEUED) self.client.executions.update(snapshot.id, Execution.TERMINATED) self.wait_for_execution_to_end(execution) schedule = self.client.execution_schedules.list( deployment_id=dep.id)[0] self.client.execution_schedules.delete(schedule.id, dep_id)
def test_schedule_execution_while_execution_running_under_same_dep(self): """ Start an execution and while it is running schedule an execution for the future, under the same deployment. """ dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep_id, client=self.client) execution1 = self.client.executions.start(deployment_id=dep_id, workflow_id='install') self._wait_for_exec_to_end_and_modify_status(execution1, Execution.STARTED) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self.client.executions.update(execution1.id, Execution.TERMINATED) self.wait_for_scheduled_execution_to_fire(dep_id) schedule = self.client.execution_schedules.list( deployment_id=dep.id)[0] self.client.execution_schedules.delete(schedule.id, dep_id)
def test_two_scheduled_execution_same_tenant(self): """ Schedule 2 executions to start a second apart. """ dsl_path = resource('dsl/basic.yaml') dep1 = self.deploy(dsl_path, wait=False, client=self.client) dep2 = self.deploy(dsl_path, wait=False, client=self.client) dep1_id = dep1.id dep2_id = dep2.id do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep1_id, client=self.client) do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep2_id, client=self.client) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id=dep1_id, workflow_id='install', schedule=scheduled_time) self.client.executions.start(deployment_id=dep2_id, workflow_id='install', schedule=scheduled_time) self.wait_for_scheduled_execution_to_fire(dep1_id) self.wait_for_scheduled_execution_to_fire(dep2_id) schedule1 = self.client.execution_schedules.list( deployment_id=dep1.id)[0] schedule2 = self.client.execution_schedules.list( deployment_id=dep2.id)[0] self.client.execution_schedules.delete(schedule1.id, dep1_id) self.client.execution_schedules.delete(schedule2.id, dep2_id)
def test_two_scheduled_execution_same_tenant(self): """ Schedule 2 executions to start a second apart. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/basic.yaml') dep1 = self.deploy(dsl_path, wait=False, client=self.client) dep2 = self.deploy(dsl_path, wait=False, client=self.client) dep1_id = dep1.id dep2_id = dep2.id do_retries(verify_deployment_env_created, 30, deployment_id=dep1_id) do_retries(verify_deployment_env_created, 30, deployment_id=dep2_id) scheduled_time = generate_scheduled_for_date() execution1 = self.client.executions.start(deployment_id=dep1_id, workflow_id='install', schedule=scheduled_time) execution2 = self.client.executions.start(deployment_id=dep2_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution1.id, Execution.SCHEDULED) self._assert_execution_status(execution2.id, Execution.SCHEDULED) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution1) self.wait_for_execution_to_end(execution2)
def test_schedule_execution_while_execution_running_under_same_dep(self): """ Start an execution and while it is running schedule an execution for the future, under the same deployment. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) execution1 = self.client.executions.start(deployment_id=dep_id, workflow_id='install') self._wait_for_exec_to_end_and_modify_status(execution1, Execution.STARTED) scheduled_time = generate_scheduled_for_date() execution2 = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution2.id, Execution.SCHEDULED) self.client.executions.update(execution1.id, Execution.TERMINATED) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution2)
def test_two_scheduled_execution_same_tenant(self): """ Schedule 2 executions to start a second apart. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/basic.yaml') dep1 = self.deploy(dsl_path, wait=False, client=self.client) dep2 = self.deploy(dsl_path, wait=False, client=self.client) dep1_id = dep1.id dep2_id = dep2.id do_retries(verify_deployment_env_created, 30, deployment_id=dep1_id) do_retries(verify_deployment_env_created, 30, deployment_id=dep2_id) scheduled_time = generate_scheduled_for_date() execution1 = self.client.executions.start(deployment_id=dep1_id, workflow_id='install', schedule=scheduled_time) execution2 = self.client.executions.start(deployment_id=dep2_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution1.id, Execution.SCHEDULED) self._assert_execution_status(execution2.id, Execution.SCHEDULED) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution1) self.wait_for_execution_to_end(execution2)
def test_schedule_execution_and_create_snapshot_same_tenant(self): """ Schedule an execution, then create snapshot. Execution 'wakes up' while snapshot is still running, so it becomes 'queued' and start when snapshot terminates. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution.id, Execution.SCHEDULED) # Create snapshot and keep it's status 'started' snapshot = self._create_snapshot_and_modify_execution_status( Execution.STARTED) time.sleep(62) # Wait for exec to 'wake up' self._assert_execution_status(execution.id, Execution.QUEUED) self.client.executions.update(snapshot.id, Execution.TERMINATED) self.wait_for_execution_to_end(execution)
def test_schedule_execution_while_execution_running_under_same_dep(self): """ Start an execution and while it is running schedule an execution for the future, under the same deployment. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) execution1 = self.client.executions.start(deployment_id=dep_id, workflow_id='install') self._wait_for_exec_to_end_and_modify_status(execution1, Execution.STARTED) scheduled_time = generate_scheduled_for_date() execution2 = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution2.id, Execution.SCHEDULED) self.client.executions.update(execution1.id, Execution.TERMINATED) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution2)
def test_deployment_statuses_for_scheduled_execution(self): dsl_path = resource("dsl/basic.yaml") deployment = self.deploy(dsl_path) deployment = self.client.deployments.get(deployment.id) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=deployment.id, workflow_id='install', schedule=scheduled_time) self.assertEqual(Execution.SCHEDULED, execution.status) self.assertEqual(deployment.latest_execution_status, DeploymentState.COMPLETED) self.assertEqual(deployment.installation_status, DeploymentState.INACTIVE) self.assertEqual(deployment.deployment_status, DeploymentState.REQUIRE_ATTENTION) # Wait for exec to 'wake up' execution = self.wait_for_scheduled_execution_to_fire(deployment.id) self.wait_for_execution_to_end(execution) deployment = self.client.deployments.get(deployment.id) self.assertEqual( deployment.latest_execution_status, DeploymentState.COMPLETED, ) self.assertEqual( deployment.installation_status, DeploymentState.ACTIVE, ) self.assertEqual( deployment.deployment_status, DeploymentState.GOOD, )
def test_schedule_execution_and_create_snapshot_same_tenant(self): """ Schedule an execution, then create snapshot. Execution 'wakes up' while snapshot is still running, so it becomes 'queued' and start when snapshot terminates. """ # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) # Create snapshot and keep it's status 'started' snapshot = self._create_snapshot_and_modify_execution_status( Execution.STARTED) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution.id, Execution.SCHEDULED) time.sleep(62) # Wait for exec to 'wake up' self._assert_execution_status(execution.id, Execution.QUEUED) self.client.executions.update(snapshot.id, Execution.TERMINATED) self.wait_for_execution_to_end(execution)
def test_scheduled_execution(self): # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/basic.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self.assertEquals(Execution.SCHEDULED, execution.status) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution)
def test_scheduled_execution(self): # The token in the container is invalid, create new valid one create_api_token() dsl_path = resource('dsl/basic.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self.assertEquals(Execution.SCHEDULED, execution.status) time.sleep(62) # Wait for exec to 'wake up' self.wait_for_execution_to_end(execution)
def test_scheduled_execution(self): dsl_path = resource('dsl/basic.yaml') dep = self.deploy(dsl_path, wait=False, client=self.client) dep_id = dep.id do_retries(verify_deployment_env_created, 30, container_id=self.env.container_id, deployment_id=dep_id, client=self.client) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) schedule = self.client.execution_schedules.list( deployment_id=dep.id)[0] self.assertEqual(schedule.workflow_id, 'install') self.assertIn('install_', schedule.id) self.wait_for_scheduled_execution_to_fire(dep_id) self.client.execution_schedules.delete(schedule.id, dep_id)
def test_cascading_scheduled_workflow_execution(self): basic_blueprint_path = self.make_yaml_file( self.component_blueprint_with_nothing_workflow) self.client.blueprints.upload(basic_blueprint_path, entity_id='workflow') deployment_id = 'd{0}'.format(uuid.uuid4()) main_blueprint = self.generate_root_blueprint_with_component() main_blueprint_path = self.make_yaml_file(main_blueprint) self.deploy_application(main_blueprint_path, deployment_id=deployment_id) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id, 'nothing_workflow', schedule=scheduled_time) executions = self.client.executions.list( workflow_id='nothing_workflow') for execution in executions: self.assertEquals(Execution.SCHEDULED, execution.status) self.assertEqual(len(executions), 2)
def test_cascading_scheduled_workflow_execution(self): basic_blueprint_path = self.make_yaml_file( self.component_blueprint_with_nothing_workflow) self.client.blueprints.upload(basic_blueprint_path, entity_id='workflow') deployment_id = 'd{0}'.format(uuid.uuid4()) main_blueprint = self.generate_root_blueprint_with_component() main_blueprint_path = self.make_yaml_file(main_blueprint) self.deploy_application(main_blueprint_path, deployment_id=deployment_id) scheduled_time = generate_scheduled_for_date() self.client.executions.start(deployment_id, 'nothing_workflow', schedule=scheduled_time) executions = self.client.executions.list( workflow_id='nothing_workflow') for execution in executions: self.assertEquals(Execution.SCHEDULED, execution.status) self.assertEqual(len(executions), 2)
def test_schedule_execution_snapshot_running_multi_tenant(self): """ - default_tenant: system execution (snapshot) is running - tenant_0: scheduled execution Scheduled execution 'wakes up' while snapshot is running in a different tenant, we expect scheduled execution to become 'queued', and start only when the snapshot terminates. """ # The token in the container is invalid, create new valid one create_api_token() create_tenants_and_add_users(client=self.client, num_of_tenants=1) tenant_client = utils.create_rest_client(username='******', password='******', tenant='tenant_0') dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=tenant_client) dep_id = dep.id time.sleep(2) # default_tenant: create snapshot and keep it's status 'started' snapshot = self._create_snapshot_and_modify_execution_status( Execution.STARTED) # tenant_0: schedule an execution for 1 min in the future scheduled_time = generate_scheduled_for_date() execution = tenant_client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution.id, Execution.SCHEDULED, tenant_client) time.sleep(62) # Wait for exec to 'wake up' self._assert_execution_status(execution.id, Execution.QUEUED, tenant_client) self.client.executions.update(snapshot.id, Execution.TERMINATED) self.wait_for_execution_to_end(execution, client=tenant_client)
def test_schedule_execution_snapshot_running_multi_tenant(self): """ - default_tenant: system execution (snapshot) is running - tenant_0: scheduled execution Scheduled execution 'wakes up' while snapshot is running in a different tenant, we expect scheduled execution to become 'queued', and start only when the snapshot terminates. """ # The token in the container is invalid, create new valid one create_api_token() create_tenants_and_add_users(client=self.client, num_of_tenants=1) tenant_client = utils.create_rest_client(username='******', password='******', tenant='tenant_0') dsl_path = resource('dsl/sleep_workflows.yaml') dep = self.deploy(dsl_path, wait=False, client=tenant_client) dep_id = dep.id time.sleep(2) # default_tenant: create snapshot and keep it's status 'started' snapshot = self._create_snapshot_and_modify_execution_status( Execution.STARTED) # tenant_0: schedule an execution for 1 min in the future scheduled_time = generate_scheduled_for_date() execution = tenant_client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution.id, Execution.SCHEDULED, tenant_client) time.sleep(62) # Wait for exec to 'wake up' self._assert_execution_status(execution.id, Execution.QUEUED, tenant_client) self.client.executions.update(snapshot.id, Execution.TERMINATED) self.wait_for_execution_to_end(execution, client=tenant_client)