示例#1
0
def build_persistent_event_tasks(instance):
    persistent_property = instance.node.properties.get(
        '_a4c_persistent_resources', None)
    if persistent_property != None:
        # send event to send resource id to alien
        tasks = []

        @task_config(send_task_events=False)
        def send_event_task(message):
            _send_event(instance, 'workflow_node', 'a4c_persistent_event',
                        message, None, None, None)

        for key, value in persistent_property.iteritems():
            persistent_cloudify_attribute = key
            persistent_alien_attribute = value
            kwargs = {
                'message':
                build_pre_event(
                    PersistentResourceEvent(persistent_cloudify_attribute,
                                            persistent_alien_attribute))
            }
            tasks.append(
                instance.ctx.local_task(local_task=send_event_task,
                                        node=instance,
                                        info=kwargs.get('message', ''),
                                        kwargs=kwargs))
        return tasks
    else:
        return None
def a4c_uninstall(**kwargs):
    graph = ctx.graph_mode()
    nodes = _get_all_nodes(ctx)
    instances = _get_all_nodes_instances(ctx)
    custom_context = CustomContext(ctx, instances, nodes)
    ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('uninstall')))
    _a4c_uninstall(ctx, graph, custom_context)
    return graph.execute()
def build_wf_relationship_event_task(rel_instance, step_id, stage, operation_name):
    event_msg = build_pre_event(WfRelationshipStepEvent(stage, step_id, operation_name, rel_instance.target_node_instance.node_id, rel_instance.target_node_instance.id))

    @task_config(send_task_events=False)
    def send_wf_step_rel_event():
        _send_event(rel_instance.node_instance, 'workflow_node', 'a4c_workflow_rel_step_event', event_msg, None, None, None)

    return rel_instance.ctx.local_task(local_task=send_wf_step_rel_event, node=rel_instance.node_instance, info=event_msg)
def build_wf_event_task(instance, step_id, stage, operation_name):
    event_msg = build_pre_event(WfEvent(stage, step_id, operation_name))

    @task_config(send_task_events=False)
    def send_wf_event_task():
        _send_event(instance, 'workflow_node', 'a4c_workflow_event', event_msg, None, None, None)

    return instance.ctx.local_task(local_task=send_wf_event_task, node=instance, info=event_msg)
def a4c_uninstall(**kwargs):
    graph = ctx.graph_mode()
    nodes = _get_all_nodes(ctx)
    instances = _get_all_nodes_instances(ctx)
    custom_context = CustomContext(ctx, instances, nodes)
    ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('uninstall')))
    _a4c_uninstall(ctx, graph, custom_context)
    return graph.execute()
示例#6
0
def a4c_heal(ctx, node_instance_id, diagnose_value='Not provided', **kwargs):
    """Reinstalls the whole subgraph of the system topology

    The subgraph consists of all the nodes that are hosted in the
    failing node's compute and the compute itself.
    Additionally it unlinks and establishes appropriate relationships

    :param ctx: cloudify context
    :param node_id: failing node's id
    :param diagnose_value: diagnosed reason of failure
    """

    ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}".format(
        node_instance_id, diagnose_value))
    failing_node = ctx.get_node_instance(node_instance_id)
    host_instance_id = failing_node._node_instance.host_id
    failing_node_host = ctx.get_node_instance(host_instance_id)
    node_id = failing_node_host.node_id
    subgraph_node_instances = failing_node_host.get_contained_subgraph()
    added_and_related = _get_all_nodes(ctx)
    try:
        graph = ctx.graph_mode()
        ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                         message=build_pre_event(
                                             WfStartEvent('heal',
                                                          'uninstall')))
        custom_context = CustomContext(ctx, subgraph_node_instances,
                                       added_and_related)
        uninstall_host(ctx, graph, custom_context, node_id)
        graph.execute()
    except:
        ctx.logger.error('Uninstall while healing failed.')
    graph = ctx.internal.task_graph
    for task in graph.tasks_iter():
        graph.remove_task(task)
    ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                     message=build_pre_event(
                                         WfStartEvent('heal', 'install')))
    custom_context = CustomContext(ctx, subgraph_node_instances,
                                   added_and_related)
    install_host(ctx, graph, custom_context, node_id)
    graph.execute()
示例#7
0
def build_wf_event_task(instance, step_id, stage):
    event_msg = build_pre_event(WfEvent(stage, step_id))

    @task_config(send_task_events=False)
    def send_wf_event_task():
        _send_event(instance, 'workflow_node', 'a4c_workflow_event', event_msg,
                    None, None, None)

    return instance.ctx.local_task(local_task=send_wf_event_task,
                                   node=instance,
                                   info=event_msg)
def a4c_heal(
        ctx,
        node_instance_id,
        diagnose_value='Not provided',
        **kwargs):
    """Reinstalls the whole subgraph of the system topology

    The subgraph consists of all the nodes that are hosted in the
    failing node's compute and the compute itself.
    Additionally it unlinks and establishes appropriate relationships

    :param ctx: cloudify context
    :param node_id: failing node's id
    :param diagnose_value: diagnosed reason of failure
    """

    ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}"
                    .format(node_instance_id, diagnose_value))
    failing_node = ctx.get_node_instance(node_instance_id)
    host_instance_id = failing_node._node_instance.host_id
    failing_node_host = ctx.get_node_instance(host_instance_id)
    node_id = failing_node_host.node_id
    subgraph_node_instances = failing_node_host.get_contained_subgraph()
    added_and_related = _get_all_nodes(ctx)
    try:
      graph = ctx.graph_mode()
      ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                               message=build_pre_event(WfStartEvent('heal', 'uninstall')))
      custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
      uninstall_host(ctx, graph, custom_context, node_id)
      graph.execute()
    except:
      ctx.logger.error('Uninstall while healing failed.')
    graph = ctx.internal.task_graph
    for task in graph.tasks_iter():
      graph.remove_task(task)
    ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                             message=build_pre_event(WfStartEvent('heal', 'install')))
    custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
    install_host(ctx, graph, custom_context, node_id)
    graph.execute()
def build_persistent_event_task(instance):
    persistent_property = instance.node.properties.get('_a4c_persistent_resource_id', None)
    if persistent_property != None:
        # send event to send resource id to alien
        splitted_persistent_property = persistent_property.split('=')
        persistent_cloudify_attribute = splitted_persistent_property[0]
        persistent_alien_attribute = splitted_persistent_property[1]
        persist_msg = build_pre_event(PersistentResourceEvent(persistent_cloudify_attribute, persistent_alien_attribute))

        @task_config(send_task_events=False)
        def send_event_task():
            _send_event(instance, 'workflow_node', 'a4c_persistent_event', persist_msg, None, None, None)

        return instance.ctx.local_task(local_task=send_event_task, node=instance, info=persist_msg)
    else:
        return None
def build_persistent_event_tasks(instance):
    persistent_property = instance.node.properties.get('_a4c_persistent_resources', None)
    if persistent_property != None:
        # send event to send resource id to alien
        tasks = []
        @task_config(send_task_events=False)
        def send_event_task(message):
            _send_event(instance, 'workflow_node', 'a4c_persistent_event', message, None, None, None)

        for key, value in persistent_property.iteritems():
            persistent_cloudify_attribute = key
            persistent_alien_attribute = value
            kwargs={'message':build_pre_event(PersistentResourceEvent(persistent_cloudify_attribute, persistent_alien_attribute))}
            tasks.append(instance.ctx.local_task(local_task=send_event_task, node=instance, info=kwargs.get('message', ''), kwargs=kwargs))
        return tasks
    else:
        return None
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
    scaled_node = ctx.get_node(node_id)
    if not scaled_node:
        raise ValueError("Node {0} doesn't exist".format(node_id))
    if not is_host_node(scaled_node):
        raise ValueError("Node {0} is not a host. This workflow can only scale hosts".format(node_id))
    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return

    curr_num_instances = scaled_node.number_of_instances
    planned_num_instances = curr_num_instances + delta
    if planned_num_instances < 1:
        raise ValueError('Provided delta: {0} is illegal. current number of'
                         'instances of node {1} is {2}'
                         .format(delta, node_id, curr_num_instances))

    modification = ctx.deployment.start_modification({
        scaled_node.id: {
            'instances': planned_num_instances
        }
    })
    ctx.logger.info(
        'Deployment modification started. [modification_id={0} : {1}]'.format(modification.id, dir(modification)))
    try:
        if delta > 0:
            ctx.logger.info('Scaling host {0} adding {1} instances'.format(node_id, delta))
            added_and_related = _get_all_nodes(modification.added)
            added = _get_all_modified_node_instances(added_and_related, 'added')
            graph = ctx.graph_mode()
            ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                             message=build_pre_event(WfStartEvent('scale', 'install')))
            custom_context = CustomContext(ctx, added, added_and_related)
            install_host(ctx, graph, custom_context, node_id)
            try:
                graph.execute()
            except:
                ctx.logger.error('Scale failed. Uninstalling node {0}'.format(node_id))
                graph = ctx.internal.task_graph
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                try:
                    custom_context = CustomContext(ctx, added, added_and_related)
                    uninstall_host(ctx, graph, custom_context, node_id)
                    graph.execute()
                except:
                    ctx.logger.error('Node {0} uninstallation following scale failure has failed'.format(node_id))
                raise
        else:
            ctx.logger.info('Unscaling host {0} removing {1} instances'.format(node_id, delta))
            removed_and_related = _get_all_nodes(modification.removed)
            removed = _get_all_modified_node_instances(removed_and_related, 'removed')
            graph = ctx.graph_mode()
            ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                             message=build_pre_event(WfStartEvent('scale', 'uninstall')))
            custom_context = CustomContext(ctx, removed, removed_and_related)
            uninstall_host(ctx, graph, custom_context, node_id)
            try:
                graph.execute()
            except:
                ctx.logger.error('Unscale failed.')
                raise
    except:
        ctx.logger.warn('Rolling back deployment modification. [modification_id={0}]'.format(modification.id))
        try:
            modification.rollback()
        except:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
示例#12
0
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
    scaled_node = ctx.get_node(node_id)
    if not scaled_node:
        raise ValueError("Node {0} doesn't exist".format(node_id))
    if not is_host_node(scaled_node):
        raise ValueError(
            "Node {0} is not a host. This workflow can only scale hosts".
            format(node_id))
    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return

    curr_num_instances = scaled_node.number_of_instances
    planned_num_instances = curr_num_instances + delta
    if planned_num_instances < 1:
        raise ValueError('Provided delta: {0} is illegal. current number of'
                         'instances of node {1} is {2}'.format(
                             delta, node_id, curr_num_instances))

    modification = ctx.deployment.start_modification(
        {scaled_node.id: {
            'instances': planned_num_instances
        }})
    ctx.logger.info(
        'Deployment modification started. [modification_id={0} : {1}]'.format(
            modification.id, dir(modification)))
    try:
        if delta > 0:
            ctx.logger.info('Scaling host {0} adding {1} instances'.format(
                node_id, delta))
            added_and_related = _get_all_nodes(modification.added)
            added = _get_all_modified_node_instances(added_and_related,
                                                     'added')
            graph = ctx.graph_mode()
            ctx.internal.send_workflow_event(
                event_type='a4c_workflow_started',
                message=build_pre_event(WfStartEvent('scale', 'install')))
            custom_context = CustomContext(ctx, added, added_and_related)
            install_host(ctx, graph, custom_context, node_id)
            try:
                graph.execute()
            except:
                ctx.logger.error(
                    'Scale failed. Uninstalling node {0}'.format(node_id))
                graph = ctx.internal.task_graph
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                try:
                    custom_context = CustomContext(ctx, added,
                                                   added_and_related)
                    uninstall_host(ctx, graph, custom_context, node_id)
                    graph.execute()
                except:
                    ctx.logger.error(
                        'Node {0} uninstallation following scale failure has failed'
                        .format(node_id))
                raise
        else:
            ctx.logger.info('Unscaling host {0} removing {1} instances'.format(
                node_id, delta))
            removed_and_related = _get_all_nodes(modification.removed)
            removed = _get_all_modified_node_instances(removed_and_related,
                                                       'removed')
            graph = ctx.graph_mode()
            ctx.internal.send_workflow_event(
                event_type='a4c_workflow_started',
                message=build_pre_event(WfStartEvent('scale', 'uninstall')))
            custom_context = CustomContext(ctx, removed, removed_and_related)
            uninstall_host(ctx, graph, custom_context, node_id)
            try:
                graph.execute()
            except:
                ctx.logger.error('Unscale failed.')
                raise
    except:
        ctx.logger.warn(
            'Rolling back deployment modification. [modification_id={0}]'.
            format(modification.id))
        try:
            modification.rollback()
        except:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
示例#13
0
def operation_task_for_instance(ctx, graph, node_id, instance, operation_fqname, step_id, custom_context):
    sequence = TaskSequenceWrapper(graph)
    msg = build_wf_event(WfEvent(instance.id, "in", step_id))
    sequence.add(instance.send_event(msg))
    relationship_count = count_relationships(instance)
    if operation_fqname == 'cloudify.interfaces.lifecycle.start':
        sequence.add(instance.execute_operation(operation_fqname))
        if _is_host_node(instance):
            sequence.add(*host_post_start(ctx, instance))
        fork = ForkjoinWrapper(graph)
        fork.add(instance.execute_operation('cloudify.interfaces.monitoring.start'))
        if relationship_count > 0:
            for relationship in instance.relationships:
                fork.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.establish'))
                fork.add(relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.establish'))
        sequence.add(
            instance.send_event("Start monitoring on node '{0}' instance '{1}'".format(node_id, instance.id)),
            forkjoin_sequence(graph, fork, instance, "establish")
        )
    elif operation_fqname == 'cloudify.interfaces.lifecycle.configure':
        as_target_relationships = custom_context.relationship_targets.get(instance.id, set())
        if relationship_count > 0 or len(as_target_relationships) > 0:
            preconfigure_tasks = ForkjoinWrapper(graph)
            for relationship in instance.relationships:
                preconfigure_tasks.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.preconfigure'))
            for relationship in as_target_relationships:
                preconfigure_tasks.add(relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.preconfigure'))
            sequence.add(forkjoin_sequence(graph, preconfigure_tasks, instance, "preconf for {0}".format(instance.id)))
        sequence.add(instance.execute_operation(operation_fqname))
        if relationship_count > 0 or len(as_target_relationships) > 0:
            postconfigure_tasks = ForkjoinWrapper(graph)
            for relationship in instance.relationships:
                postconfigure_tasks.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.postconfigure'))
            for relationship in as_target_relationships:
                task = relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.postconfigure')
                _set_send_node_event_on_error_handler(task, instance, "Error occurred while postconfiguring node as target for relationship {0} - ignoring...".format(relationship))
                postconfigure_tasks.add(task)
            msg = "postconf for {0}".format(instance.id)
            sequence.add(forkjoin_sequence(graph, postconfigure_tasks, instance, msg))
        persistent_property = instance.node.properties.get('_a4c_persistent_resource_id', None)
        if persistent_property != None:
            # send event to send resource id to alien
            splitted_persistent_property = persistent_property.split('=')
            persistent_cloudify_attribute = splitted_persistent_property[0]
            persistent_alien_attribute = splitted_persistent_property[1]

            persist_msg = build_pre_event(PersistentResourceEvent(persistent_cloudify_attribute, persistent_alien_attribute))

            @task_config(send_task_events=False)
            def send_event_task():
                _send_event(instance, 'workflow_node', 'a4c_persistent_event', persist_msg, None, None, None)
            sequence.add(instance.ctx.local_task(
                local_task=send_event_task,
                node=instance,
                info=persist_msg))
    elif operation_fqname == 'cloudify.interfaces.lifecycle.stop':
        if _is_host_node(instance):
            sequence.add(*host_pre_stop(instance))
        task = instance.execute_operation(operation_fqname)
        _set_send_node_event_on_error_handler(task, instance, "Error occurred while stopping node - ignoring...")
        sequence.add(task)
        # now call unlink onto relations' target
        if relationship_count > 0:
            fork = ForkjoinWrapper(graph)
            for relationship in instance.relationships:
                unlink_task_source = relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.unlink')
                _set_send_node_event_on_error_handler(unlink_task_source, instance, "Error occurred while unlinking node from target {0} - ignoring...".format(relationship.target_id))
                fork.add(unlink_task_source)
                unlink_task_target = relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.unlink')
                _set_send_node_event_on_error_handler(unlink_task_target, instance, "Error occurred while unlinking node from target {0} - ignoring...".format(relationship.target_id))
                fork.add(unlink_task_target)
            sequence.add(forkjoin_sequence(graph, fork, instance, "unlink"))
    elif operation_fqname == 'cloudify.interfaces.lifecycle.delete':
        task = instance.execute_operation(operation_fqname)
        _set_send_node_event_on_error_handler(task, instance, "Error occurred while deleting node - ignoring...")
        sequence.add(task)
    else:
        # the default behavior : just do the job
        sequence.add(instance.execute_operation(operation_fqname))
    msg = build_wf_event(WfEvent(instance.id, "ok", step_id))
    sequence.add(instance.send_event(msg))
    return sequence