Exemplo n.º 1
0
    def cancel_workflow_task(self, execution_id, rest_token, tenant,
                             execution_token, rest_host):
        logger.info('Cancelling workflow {0}'.format(execution_id))

        class CancelCloudifyContext(object):
            """A CloudifyContext that has just enough data to cancel workflows
            """
            def __init__(self):
                self.rest_host = rest_host
                self.tenant_name = tenant['name']
                self.rest_token = rest_token
                self.execution_token = execution_token
                # always bypass - this is a kill, as forceful as we can get
                self.bypass_maintenance = True

        with current_workflow_ctx.push(CancelCloudifyContext()):
            self._workflow_registry.cancel(execution_id)
            self._cancel_agent_operations(execution_id)
            try:
                update_execution_status(execution_id, Execution.CANCELLED)
            except InvalidExecutionUpdateStatus:
                # the workflow process might have cleaned up, and marked the
                # workflow failed or cancelled already
                logger.info('Failed to update execution status: {0}'.format(
                    execution_id))
Exemplo n.º 2
0
 def test_install_empty(self):
     """No instances - no operations"""
     ctx, graph = self._make_ctx_and_graph()
     pr = LifecycleProcessor(graph)
     with current_workflow_ctx.push(ctx):
         pr.install()
     assert graph.tasks == []
Exemplo n.º 3
0
 def test_install_empty_instance(self):
     """Instance without interfaces - still set to started"""
     ctx, graph = self._make_ctx_and_graph()
     pr = self._make_lifecycle_processor(ctx,
                                         graph,
                                         nodes=[self._make_node()],
                                         instances=[self._make_instance()])
     with current_workflow_ctx.push(ctx):
         pr.install()
     assert any(
         task.name == 'SetNodeInstanceStateTask' and task.info == 'started'
         for task in graph.tasks)
Exemplo n.º 4
0
 def _inner(*args, **kwargs):
     task = args[0]
     if api.has_cancel_request():
         return task.async_result
     if task.execute_after and task.execute_after > time.time():
         t = threading.Timer(task.execute_after - time.time(),
                             _inner,
                             args=args,
                             kwargs=kwargs)
         t.daemon = True
         t.start()
         return task.async_result
     with current_workflow_ctx.push(task.workflow_context):
         return f(*args, **kwargs)
 def _get_and_execute_task_graph(self, token_info, deployment_id,
                                 dep_ctx, tenant, tenant_client,
                                 workflow_ctx, ctx_params,
                                 deps_with_failed_plugins,
                                 failed_deployments):
     """
     While in the correct workflow context, this method
     creates a `create_deployment_env` task graph and executes it.
     Since this method is being executed by threads, it appends errors
     to thread-safe queues, in order to handle them later outside of
     this scope.
     """
     # The workflow context is thread local so we need to push it for
     # each thread.
     try:
         with current_workflow_ctx.push(workflow_ctx, ctx_params):
             try:
                 api_token = self._get_api_token(
                     token_info[tenant][deployment_id]
                 )
                 dep = tenant_client.deployments.get(deployment_id)
                 blueprint = tenant_client.blueprints.get(
                     dep_ctx.blueprint.id,
                 )
                 tasks_graph = self._get_tasks_graph(
                     dep_ctx,
                     blueprint,
                     dep,
                     api_token,
                 )
                 with dep_ctx:
                     tasks_graph.execute()
             except RuntimeError as re:
                 if self._should_ignore_plugin_failure(re.message):
                     ctx.logger.warning('Failed to install plugins for '
                                        'deployment `{0}` under tenant '
                                        '`{1}`.  Proceeding since '
                                        '`ignore_plugin_failure` flag was'
                                        ' used.'
                                        .format(deployment_id, tenant))
                     ctx.logger.debug(re.message)
                     deps_with_failed_plugins.put((deployment_id, tenant))
                 else:
                     failed_deployments.put((deployment_id, tenant))
                     ctx.logger.info(re)
     finally:
         self._semaphore.release()
Exemplo n.º 6
0
 def _get_and_execute_task_graph(self, token_info, deployment_id,
                                 dep_ctx, tenant, tenant_client,
                                 workflow_ctx, ctx_params,
                                 deps_with_failed_plugins,
                                 failed_deployments):
     """
     While in the correct workflow context, this method
     creates a `create_deployment_env` task graph and executes it.
     Since this method is being executed by threads, it appends errors
     to thread-safe queues, in order to handle them later outside of
     this scope.
     """
     # The workflow context is thread local so we need to push it for
     # each thread.
     try:
         with current_workflow_ctx.push(workflow_ctx, ctx_params):
             try:
                 api_token = self._get_api_token(
                     token_info[tenant][deployment_id]
                 )
                 dep = tenant_client.deployments.get(deployment_id)
                 blueprint = tenant_client.blueprints.get(
                     dep_ctx.blueprint.id,
                 )
                 tasks_graph = self._get_tasks_graph(
                     dep_ctx,
                     blueprint,
                     dep,
                     api_token,
                 )
                 with dep_ctx:
                     tasks_graph.execute()
             except RuntimeError as re:
                 if self._should_ignore_plugin_failure(re.message):
                     ctx.logger.warning('Failed to install plugins for '
                                        'deployment `{0}` under tenant '
                                        '`{1}`.  Proceeding since '
                                        '`ignore_plugin_failure` flag was'
                                        ' used.'
                                        .format(deployment_id, tenant))
                     ctx.logger.debug(re.message)
                     deps_with_failed_plugins.put((deployment_id, tenant))
                 else:
                     failed_deployments.put((deployment_id, tenant))
                     ctx.logger.info(re)
     finally:
         self._semaphore.release()
Exemplo n.º 7
0
    def _create_inter_deployment_dependencies(deployments_queue,
                                              failed_deployments_queue,
                                              wf_context, context_params,
                                              update_service_composition):
        while True:
            try:
                tenant, deployment_id = deployments_queue.get_nowait()
            except queue.Empty:
                break

            with current_workflow_ctx.push(wf_context, context_params):
                try:
                    tenant_client = get_rest_client(tenant=tenant)
                    tenant_client.inter_deployment_dependencies.restore(
                        deployment_id, update_service_composition)
                except RuntimeError as err:
                    failed_deployments_queue.put((deployment_id, tenant))
                    ctx.logger.info(
                        'Failed creating inter deployment '
                        'dependencies for deployment %s from '
                        'tenant %s. %s', deployment_id, tenant, err)
Exemplo n.º 8
0
    def test_install_create_operation(self):
        """Instance with a create interface - the operation is called"""
        ctx, graph = self._make_ctx_and_graph()

        pr = self._make_lifecycle_processor(
            ctx,
            graph,
            nodes=[
                self._make_node(operations={
                    'cloudify.interfaces.lifecycle.create':
                    self._make_operation()
                },
                                plugins=[self._make_plugin()])
            ],
            instances=[self._make_instance()])
        with current_workflow_ctx.push(ctx):
            pr.install()
        assert any(task.name == 'plugin1.op1' for task in graph.tasks)
        assert any(
            task.name == 'SetNodeInstanceStateTask' and task.info == 'started'
            for task in graph.tasks)
Exemplo n.º 9
0
    def test_update_resumed_install(self):
        """When resuming an interrupted install, the instance is deleted first
        """
        ctx, graph = self._make_ctx_and_graph()

        node = self._make_node(operations={
            'cloudify.interfaces.lifecycle.create':
            self._make_operation(operation='plugin1.op1'),
            'cloudify.interfaces.lifecycle.delete':
            self._make_operation(operation='plugin1.op2')
        },
                               plugins=[{
                                   'name': 'plugin1',
                                   'package_name': 'plugin1'
                               }])
        instance = self._make_instance()
        pr = self._make_lifecycle_processor(ctx,
                                            graph,
                                            nodes=[node],
                                            instances=[instance])
        with current_workflow_ctx.push(ctx):
            pr.install()

        # after creating the install graph, resume it - it should first
        # delete the instance, before re-installing it
        ctx.resume = True
        instance['state'] = 'creating'
        pr._update_resumed_install(graph)

        delete_task_index = None
        install_task_index = None
        for ix, task in enumerate(graph.linearize()):
            if task.name == 'plugin1.op1':
                install_task_index = ix
            elif task.name == 'plugin1.op2':
                delete_task_index = ix

        assert install_task_index is not None
        assert delete_task_index is not None
        assert delete_task_index < install_task_index
Exemplo n.º 10
0
    def cancel_workflow_task(self, execution_id, rest_token, tenant,
                             execution_token):
        logger.info('Cancelling workflow {0}'.format(execution_id))

        class CancelCloudifyContext(object):
            """A CloudifyContext that has just enough data to cancel workflows
            """
            def __init__(self):
                self.tenant = tenant
                self.tenant_name = tenant['name']
                self.rest_token = rest_token
                self.execution_token = execution_token

        with current_workflow_ctx.push(CancelCloudifyContext()):
            self._workflow_registry.cancel(execution_id)
            self._cancel_agent_operations(execution_id)
            try:
                update_execution_status(execution_id, Execution.CANCELLED)
            except InvalidExecutionUpdateStatus:
                # the workflow process might have cleaned up, and marked the
                # workflow failed or cancelled already
                logger.info('Failed to update execution status: {0}'
                            .format(execution_id))
Exemplo n.º 11
0
    def test_update_resumed_install_dependency(self):
        """Similar to test_update_resumed_install, but with a relationship too
        """
        ctx, graph = self._make_ctx_and_graph()

        node1 = self._make_node(operations={
            'cloudify.interfaces.lifecycle.create':
            self._make_operation(operation='plugin1.n1_create'),
            'cloudify.interfaces.lifecycle.delete':
            self._make_operation(operation='plugin1.n1_delete')
        },
                                plugins=[self._make_plugin()])
        node2 = self._make_node(
            relationships=[{
                'target_id':
                'node1',
                'type_hierarchy': ['cloudify.relationships.depends_on']
            }],
            id='node2',
            operations={
                'cloudify.interfaces.lifecycle.create':
                self._make_operation(operation='plugin1.n2_create'),
                'cloudify.interfaces.lifecycle.delete':
                self._make_operation(operation='plugin1.n2_delete')
            },
            plugins=[self._make_plugin()])
        ni1 = self._make_instance()
        ni2 = self._make_instance(relationships=[{
            'target_id': 'node1_1',
            'target_name': 'node1'
        }],
                                  node_id='node2',
                                  id='node2_1')
        pr = self._make_lifecycle_processor(ctx,
                                            graph,
                                            nodes=[node1, node2],
                                            instances=[ni1, ni2])
        with current_workflow_ctx.push(ctx):
            pr.install()

        # resume an install, with one node that was interrupted in creating
        ni1['state'] = 'creating'
        ctx.resume = True
        pr._update_resumed_install(graph)

        # to check that the operations happened in the desired order, examine
        # their position in the linearized graph
        task_indexes = {
            'n1_create': None,
            'n1_delete': None,
            'n2_create': None,
            'n2_delete': None,
        }
        for ix, task in enumerate(graph.linearize()):
            name = task.name.replace('plugin1.', '')
            if name in task_indexes:
                task_indexes[name] = ix

        # delete happened before create - reinstall
        assert task_indexes['n1_delete'] < task_indexes['n1_create']
        # dependency installed before the dependent
        assert task_indexes['n1_create'] < task_indexes['n2_create']

        # n2 didnt need to be deleted, because it wasn't in the creating state
        assert task_indexes['n2_delete'] is None