コード例 #1
0
def upgrade(node_instance_id, config_set, config, config_url, config_format,
            chart_version, chart_repo_url, repo_user, repo_user_password,
            **kwargs):
    node_instance = ctx.get_node_instance(node_instance_id)

    if not node_instance_id:
        raise NonRecoverableError(
            'No such node_instance_id in deployment: {0}.'.format(
                node_instance_id))

    kwargs = {}
    kwargs['config'] = ''
    kwargs['chart_version'] = str(chart_version)
    kwargs['chart_repo'] = str(chart_repo_url)
    kwargs['config_set'] = str(config_set)
    kwargs['config_json'] = str(config)
    kwargs['config_url'] = str(config_url)
    kwargs['config_format'] = str(config_format)
    kwargs['repo_user'] = str(repo_user)
    kwargs['repo_user_passwd'] = str(repo_user_password)
    operation_args = {
        'operation': 'upgrade',
    }
    operation_args['kwargs'] = kwargs
    node_instance.execute_operation(**operation_args)
コード例 #2
0
def update_resource_definition(node_instance_id, resource_definition_changes,
                               **kwargs):
    """
    Updates a Kubernetes Resource's resource definition.

    Example Usage:
    ```shell
    $ cfy blueprints upload \
        examples/wordpress-blueprint.yaml -b wordpress
    $ cfy deployments create --skip-plugins-validation -b wordpress
    $ cfy executions start install -d wordpress
    $ cfy node-instances list -d wordpress
    # At this point copy the node_instance_id of wordpress_svc node.
    $ cfy node-instances get [wordpress_svc node instance id]
    # At this point copy the cluster_ip in the resource definition.
    $ cfy executions start update_resource_definition -d wordpress -vv \
        -p resource_definition_changes="
        {'metadata': {'resourceVersion': '0'},
        'spec': {'clusterIP': '10.110.97.242',
        'ports': [{'port': 80, 'nodePort': 30081}]}
        }" -p node_instance_id=[wordpress_svc node instance id]
    ```

    :param node_instance_id: A string.
        The node instance ID of the node instance containing the resource.
    :param resource_definition_changes: A dictionary encoded as a unicode
        string representing the changes to the resoruce definition.
    """

    if isinstance(resource_definition_changes, basestring):
        resource_definition_changes = \
            ast.literal_eval(resource_definition_changes)

    node_instance = ctx.get_node_instance(node_instance_id)

    if not node_instance_id:
        raise NonRecoverableError(
            'No such node_instance_id in deployment: {0}.'.format(
                node_instance_id))

    # Execute start operation to update to
    # the latest version of the resource definition.
    node_instance.logger.info(
        'Executing start in order to get the current state.')
    execute_node_instance_operation(node_instance, RESOURCE_START_OPERATION)
    node_instance.logger.info(
        'Executed start in order to get the current state.')

    # Execute update operation to push the change to Kubernetes.
    node_instance.logger.info(
        'Executing update in order to push the new changes.')
    execute_node_instance_operation(
        node_instance,
        RESOURCE_UPDATE_OPERATION,
        _params={DEFINITION_ADDITIONS: resource_definition_changes})
    node_instance.logger.info(
        'Executed update in order to push the new changes.')
コード例 #3
0
def a4c_heal(ctx, node_instance_id, diagnose_value='Not provided', **kwargs):
    """Reinstalls the whole subgraph of the system topology

    The subgraph consists of all the nodes that are hosted in the
    failing node's compute and the compute itself.
    Additionally it unlinks and establishes appropriate relationships

    :param ctx: cloudify context
    :param node_id: failing node's id
    :param diagnose_value: diagnosed reason of failure
    """

    ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}".format(
        node_instance_id, diagnose_value))
    failing_node = ctx.get_node_instance(node_instance_id)
    host_instance_id = failing_node._node_instance.host_id
    failing_node_host = ctx.get_node_instance(host_instance_id)
    node_id = failing_node_host.node_id
    subgraph_node_instances = failing_node_host.get_contained_subgraph()
    added_and_related = _get_all_nodes(ctx)
    try:
        graph = ctx.graph_mode()
        ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                         message=build_pre_event(
                                             WfStartEvent('heal',
                                                          'uninstall')))
        custom_context = CustomContext(ctx, subgraph_node_instances,
                                       added_and_related)
        uninstall_host(ctx, graph, custom_context, node_id)
        graph.execute()
    except:
        ctx.logger.error('Uninstall while healing failed.')
    graph = ctx.internal.task_graph
    for task in graph.tasks_iter():
        graph.remove_task(task)
    ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                     message=build_pre_event(
                                         WfStartEvent('heal', 'install')))
    custom_context = CustomContext(ctx, subgraph_node_instances,
                                   added_and_related)
    install_host(ctx, graph, custom_context, node_id)
    graph.execute()
コード例 #4
0
def a4c_heal(
        ctx,
        node_instance_id,
        diagnose_value='Not provided',
        **kwargs):
    """Reinstalls the whole subgraph of the system topology

    The subgraph consists of all the nodes that are hosted in the
    failing node's compute and the compute itself.
    Additionally it unlinks and establishes appropriate relationships

    :param ctx: cloudify context
    :param node_id: failing node's id
    :param diagnose_value: diagnosed reason of failure
    """

    ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}"
                    .format(node_instance_id, diagnose_value))
    failing_node = ctx.get_node_instance(node_instance_id)
    host_instance_id = failing_node._node_instance.host_id
    failing_node_host = ctx.get_node_instance(host_instance_id)
    node_id = failing_node_host.node_id
    subgraph_node_instances = failing_node_host.get_contained_subgraph()
    added_and_related = _get_all_nodes(ctx)
    try:
      graph = ctx.graph_mode()
      ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                               message=build_pre_event(WfStartEvent('heal', 'uninstall')))
      custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
      uninstall_host(ctx, graph, custom_context, node_id)
      graph.execute()
    except:
      ctx.logger.error('Uninstall while healing failed.')
    graph = ctx.internal.task_graph
    for task in graph.tasks_iter():
      graph.remove_task(task)
    ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                             message=build_pre_event(WfStartEvent('heal', 'install')))
    custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
    install_host(ctx, graph, custom_context, node_id)
    graph.execute()
コード例 #5
0
def rollback(node_instance_id, revision, **kwargs):
    node_instance = ctx.get_node_instance(node_instance_id)

    if not node_instance_id:
        raise NonRecoverableError(
            'No such node_instance_id in deployment: {0}.'.format(
                node_instance_id))

    kwargs = {}
    kwargs['revision'] = str(revision)
    operation_args = {
        'operation': 'rollback',
    }
    operation_args['kwargs'] = kwargs
    node_instance.execute_operation(**operation_args)