Пример #1
0
def generic_scale(_ctx, delta, modification, graph):

    try:
        _ctx.logger.info('Deployment modification started. '
                         '[modification_id={0}]'.format(modification.id))
        if delta > 0:
            added_and_related = set(modification.added.node_instances)
            added = set(i for i in added_and_related
                        if i.modification == 'added')
            related = added_and_related - added
            try:
                lifecycle.install_node_instances(graph=graph,
                                                 node_instances=added,
                                                 related_nodes=related)
            except:
                _ctx.logger.error('Scale out failed, scaling back in.')
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                lifecycle.uninstall_node_instances(graph=graph,
                                                   node_instances=added,
                                                   related_nodes=related)
                raise
        else:
            removed_and_related = set(modification.removed.node_instances)
            removed = set(i for i in removed_and_related
                          if i.modification == 'removed')
            related = removed_and_related - removed
            lifecycle.uninstall_node_instances(graph=graph,
                                               node_instances=removed,
                                               related_nodes=related)
    except:
        _ctx.logger.warn('Rolling back deployment modification. '
                         '[modification_id={0}]'.format(modification.id))
        try:
            modification.rollback()
        except:
            _ctx.logger.warn('Deployment modification rollback failed. The '
                             'deployment model is most likely in some '
                             'corrupted state.'
                             '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except:
            _ctx.logger.warn('Deployment modification finish failed. The '
                             'deployment model is most likely in some '
                             'corrupted state.'
                             '[modification_id={0}]'.format(modification.id))
            raise
Пример #2
0
        def _install_nodes():
            if skip_install:
                return
            # Adding nodes or node instances should be based on modified
            # instances
            lifecycle.install_node_instances(
                graph=graph,
                node_instances=to_install,
                related_nodes=set(
                    instances_by_change['added_target_instances_ids'][1]))

            # This one as well.
            lifecycle.execute_establish_relationships(
                graph=graph,
                node_instances=set(
                    instances_by_change['extended_and_target_instances'][1]),
                modified_relationship_ids=modified_entity_ids['relationship'])
def scale_entity(ctx,
                 scalable_entity_name,
                 delta,
                 scale_compute,
                 ignore_failure=False,
                 **kwargs):
    """Scales in/out the subgraph of node_or_group_name.

    If a node name is passed, and `scale_compute` is set to false, the
    subgraph will consist of all the nodes that are contained in the node and
    the node itself.
    If a node name is passed, and `scale_compute` is set to true, the subgraph
    will consist of all nodes that are contained in the compute node that
    contains the node and the compute node itself.
    If a group name or a node that is not contained in a compute
    node, is passed, this property is ignored.

    `delta` is used to specify the scale factor.
    For `delta > 0`: If current number of instances is `N`, scale out to
    `N + delta`.
    For `delta < 0`: If current number of instances is `N`, scale in to
    `N - |delta|`.

    :param ctx: cloudify context
    :param scalable_entity_name: the node or group name to scale
    :param delta: scale in/out factor
    :param scale_compute: should scale apply on compute node containing
                          the specified node
    :param ignore_failure: ignore operations failures in uninstall workflow
    """
    if isinstance(delta, basestring):
        try:
            delta = int(delta)
        except ValueError:
            raise ValueError('The delta parameter must be a number. Got: {0}'
                             .format(delta))

    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return

    scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
    if scaling_group:
        curr_num_instances = scaling_group['properties']['current_instances']
        planned_num_instances = curr_num_instances + delta
        scale_id = scalable_entity_name
    else:
        node = ctx.get_node(scalable_entity_name)
        if not node:
            raise ValueError("No scalable entity named {0} was found".format(
                scalable_entity_name))
        host_node = node.host_node
        scaled_node = host_node if (scale_compute and host_node) else node
        curr_num_instances = scaled_node.number_of_instances
        planned_num_instances = curr_num_instances + delta
        scale_id = scaled_node.id

    if planned_num_instances < 0:
        raise ValueError('Provided delta: {0} is illegal. current number of '
                         'instances of entity {1} is {2}'
                         .format(delta,
                                 scalable_entity_name,
                                 curr_num_instances))
    modification = ctx.deployment.start_modification({
        scale_id: {
            'instances': planned_num_instances

            # These following parameters are not exposed at the moment,
            # but should be used to control which node instances get scaled in
            # (when scaling in).
            # They are mentioned here, because currently, the modification API
            # is not very documented.
            # Special care should be taken because if `scale_compute == True`
            # (which is the default), then these ids should be the compute node
            # instance ids which are not necessarily instances of the node
            # specified by `scalable_entity_name`.

            # Node instances denoted by these instance ids should be *kept* if
            # possible.
            # 'removed_ids_exclude_hint': [],

            # Node instances denoted by these instance ids should be *removed*
            # if possible.
            # 'removed_ids_include_hint': []
        }
    })
    graph = ctx.graph_mode()
    try:
        ctx.logger.info('Deployment modification started. '
                        '[modification_id={0}]'.format(modification.id))
        if delta > 0:
            added_and_related = set(modification.added.node_instances)
            added = set(i for i in added_and_related
                        if i.modification == 'added')
            related = added_and_related - added
            try:
                lifecycle.install_node_instances(
                    graph=graph,
                    node_instances=added,
                    related_nodes=related)
            except Exception:
                ctx.logger.error('Scale out failed, scaling back in.')
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                lifecycle.uninstall_node_instances(
                    graph=graph,
                    node_instances=added,
                    ignore_failure=ignore_failure,
                    related_nodes=related)
                raise
        else:
            removed_and_related = set(modification.removed.node_instances)
            removed = set(i for i in removed_and_related
                          if i.modification == 'removed')
            related = removed_and_related - removed
            lifecycle.uninstall_node_instances(
                graph=graph,
                node_instances=removed,
                ignore_failure=ignore_failure,
                related_nodes=related)
    except Exception:
        ctx.logger.warn('Rolling back deployment modification. '
                        '[modification_id={0}]'.format(modification.id))
        try:
            modification.rollback()
        except Exception:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except Exception:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
def update(ctx,
           update_id,
           added_instance_ids,
           added_target_instances_ids,
           removed_instance_ids,
           remove_target_instance_ids,
           modified_entity_ids,
           extended_instance_ids,
           extend_target_instance_ids,
           reduced_instance_ids,
           reduce_target_instance_ids,
           skip_install,
           skip_uninstall,
           ignore_failure=False):
    instances_by_change = {
        'added_instances': (added_instance_ids, []),
        'added_target_instances_ids': (added_target_instances_ids, []),
        'removed_instances': (removed_instance_ids, []),
        'remove_target_instance_ids': (remove_target_instance_ids, []),
        'extended_and_target_instances':
            (extended_instance_ids + extend_target_instance_ids, []),
        'reduced_and_target_instances':
            (reduced_instance_ids + reduce_target_instance_ids, []),
    }

    for instance in ctx.node_instances:

        instance_holders = \
            [instance_holder
             for _, (changed_ids, instance_holder)
             in instances_by_change.iteritems()
             if instance.id in changed_ids]

        for instance_holder in instance_holders:
            instance_holder.append(instance)

    if not skip_install:
        graph = ctx.graph_mode()
        # Adding nodes or node instances should be based on modified instances
        lifecycle.install_node_instances(
            graph=graph,
            node_instances=set(instances_by_change['added_instances'][1]),
            related_nodes=set(instances_by_change['added_target_instances_ids']
                              [1]))

        # This one as well.
        lifecycle.execute_establish_relationships(
            graph=ctx.graph_mode(),
            node_instances=set(
                    instances_by_change['extended_and_target_instances'][1]),
            modified_relationship_ids=modified_entity_ids['relationship']
        )

    if not skip_uninstall:
        graph = ctx.graph_mode()

        lifecycle.execute_unlink_relationships(
            graph=graph,
            node_instances=set(
                    instances_by_change['reduced_and_target_instances'][1]),
            modified_relationship_ids=modified_entity_ids['relationship']
        )

        lifecycle.uninstall_node_instances(
            graph=graph,
            node_instances=set(instances_by_change['removed_instances'][1]),
            ignore_failure=ignore_failure,
            related_nodes=set(
                    instances_by_change['remove_target_instance_ids'][1])
        )

    # Finalize the commit (i.e. remove relationships or nodes)
    client = get_rest_client()
    client.deployment_updates.finalize_commit(update_id)
def install(ctx, **kwargs):
    """Default install workflow"""
    lifecycle.install_node_instances(
        graph=ctx.graph_mode(),
        node_instances=set(ctx.node_instances))
Пример #6
0
def install(ctx, **kwargs):
    """Default install workflow"""
    lifecycle.install_node_instances(graph=ctx.graph_mode(),
                                     node_instances=set(ctx.node_instances))
Пример #7
0
def scale_entity(ctx,
                 scalable_entity_name,
                 delta,
                 scale_compute,
                 ignore_failure=False,
                 include_instances=None,
                 exclude_instances=None,
                 rollback_if_failed=True,
                 **kwargs):
    """Scales in/out the subgraph of node_or_group_name.

    If a node name is passed, and `scale_compute` is set to false, the
    subgraph will consist of all the nodes that are contained in the node and
    the node itself.
    If a node name is passed, and `scale_compute` is set to true, the subgraph
    will consist of all nodes that are contained in the compute node that
    contains the node and the compute node itself.
    If a group name or a node that is not contained in a compute
    node, is passed, this property is ignored.

    `delta` is used to specify the scale factor.
    For `delta > 0`: If current number of instances is `N`, scale out to
    `N + delta`.
    For `delta < 0`: If current number of instances is `N`, scale in to
    `N - |delta|`.

    :param ctx: cloudify context
    :param scalable_entity_name: the node or group name to scale
    :param delta: scale in/out factor
    :param scale_compute: should scale apply on compute node containing
                          the specified node
    :param ignore_failure: ignore operations failures in uninstall workflow
    :param include_instances: Instances to include when scaling down
    :param exclude_instances: Instances to exclude when scaling down
    :param rollback_if_failed: when False, no rollback will be triggered.
    """
    include_instances = include_instances or []
    exclude_instances = exclude_instances or []
    if isinstance(include_instances, basestring):
        include_instances = [include_instances]
    if isinstance(exclude_instances, basestring):
        exclude_instances = [exclude_instances]
    include_instances = [str(inst) for inst in include_instances]
    exclude_instances = [str(inst) for inst in exclude_instances]

    if isinstance(delta, basestring):
        try:
            delta = int(delta)
        except ValueError:
            raise ValueError(
                'The delta parameter must be a number. Got: {0}'.format(delta))

    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return

    if delta > 0 and (include_instances or exclude_instances):
        raise ValueError(
            'Instances cannot be included or excluded when scaling up.')

    scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
    if scaling_group:
        groups_members = get_groups_with_members(ctx)
        # Available instances for checking inclusions/exclusions needs to
        # include all groups and their members
        available_instances = set(chain.from_iterable(
            groups_members.values())).union(groups_members)
        validate_inclusions_and_exclusions(
            include_instances,
            exclude_instances,
            available_instances=available_instances,
            delta=delta,
            scale_compute=scale_compute,
            groups_members=groups_members,
        )
        curr_num_instances = scaling_group['properties']['current_instances']
        planned_num_instances = curr_num_instances + delta
        scale_id = scalable_entity_name
    else:
        node = ctx.get_node(scalable_entity_name)
        if not node:
            raise ValueError("No scalable entity named {0} was found".format(
                scalable_entity_name))
        validate_inclusions_and_exclusions(
            include_instances,
            exclude_instances,
            available_instances=[instance.id for instance in node.instances],
            delta=delta,
            scale_compute=scale_compute,
        )
        host_node = node.host_node
        scaled_node = host_node if (scale_compute and host_node) else node
        curr_num_instances = scaled_node.number_of_instances
        planned_num_instances = curr_num_instances + delta
        scale_id = scaled_node.id

    if planned_num_instances < 0:
        raise ValueError('Provided delta: {0} is illegal. current number of '
                         'instances of entity {1} is {2}'.format(
                             delta, scalable_entity_name, curr_num_instances))
    modification = ctx.deployment.start_modification({
        scale_id: {
            'instances': planned_num_instances,
            'removed_ids_exclude_hint': exclude_instances,
            'removed_ids_include_hint': include_instances,

            # While these parameters are now exposed, this comment is being
            # kept as it provides useful insight into the hints
            # These following parameters are not exposed at the moment,
            # but should be used to control which node instances get scaled in
            # (when scaling in).
            # They are mentioned here, because currently, the modification API
            # is not very documented.
            # Special care should be taken because if `scale_compute == True`
            # (which is the default), then these ids should be the compute node
            # instance ids which are not necessarily instances of the node
            # specified by `scalable_entity_name`.

            # Node instances denoted by these instance ids should be *kept* if
            # possible.
            # 'removed_ids_exclude_hint': [],

            # Node instances denoted by these instance ids should be *removed*
            # if possible.
            # 'removed_ids_include_hint': []
        }
    })
    # refresh node instances so that workflow_ctx.get_node_instance() can
    # return the new node instance that were just created
    ctx.refresh_node_instances()
    graph = ctx.graph_mode()
    try:
        ctx.logger.info('Deployment modification started. '
                        '[modification_id={0}]'.format(modification.id))
        if delta > 0:
            added_and_related = set(modification.added.node_instances)
            added = set(i for i in added_and_related
                        if i.modification == 'added')
            related = added_and_related - added
            try:
                lifecycle.install_node_instances(graph=graph,
                                                 node_instances=added,
                                                 related_nodes=related)
            except Exception:
                if not rollback_if_failed:
                    ctx.logger.error('Scale out failed.')
                    raise

                ctx.logger.error('Scale out failed, scaling back in.')
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                lifecycle.uninstall_node_instances(
                    graph=graph,
                    node_instances=added,
                    ignore_failure=ignore_failure,
                    related_nodes=related)
                raise
        else:
            removed_and_related = set(modification.removed.node_instances)
            removed = set(i for i in removed_and_related
                          if i.modification == 'removed')
            related = removed_and_related - removed
            lifecycle.uninstall_node_instances(graph=graph,
                                               node_instances=removed,
                                               ignore_failure=ignore_failure,
                                               related_nodes=related)
    except Exception:
        if not rollback_if_failed:
            raise

        ctx.logger.warn('Rolling back deployment modification. '
                        '[modification_id={0}]'.format(modification.id))
        try:
            modification.rollback()
        except Exception:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except Exception:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
def scale(ctx, node_id, delta, scale_compute, **kwargs):
    """Scales in/out the subgraph of node_id.

    If `scale_compute` is set to false, the subgraph will consist of all
    the nodes that are contained in `node_id` and `node_id` itself.
    If `scale_compute` is set to true, the subgraph will consist of all
    nodes that are contained in the compute node that contains `node_id`
    and the compute node itself.
    If `node_id` is not contained in a compute node and is not a compute node,
    this property is ignored.

    `delta` is used to specify the scale factor.
    For `delta > 0`: If current number of instances is `N`, scale out to
    `N + delta`.
    For `delta < 0`: If current number of instances is `N`, scale in to
    `N - |delta|`.

    :param ctx: cloudify context
    :param node_id: the node_id to scale
    :param delta: scale in/out factor
    :param scale_compute: should scale apply on compute node containing
                          'node_id'
    """
    graph = ctx.graph_mode()
    node = ctx.get_node(node_id)
    if not node:
        raise ValueError("Node {0} doesn't exist".format(node_id))
    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return
    host_node = node.host_node
    scaled_node = host_node if (scale_compute and host_node) else node
    curr_num_instances = scaled_node.number_of_instances
    planned_num_instances = curr_num_instances + delta
    if planned_num_instances < 0:
        raise ValueError('Provided delta: {0} is illegal. current number of'
                         'instances of node {1} is {2}'
                         .format(delta, node_id, curr_num_instances))

    modification = ctx.deployment.start_modification({
        scaled_node.id: {
            'instances': planned_num_instances

            # These following parameters are not exposed at the moment,
            # but should be used to control which node instances get scaled in
            # (when scaling in).
            # They are mentioned here, because currently, the modification API
            # is not very documented.
            # Special care should be taken because if `scale_compute == True`
            # (which is the default), then these ids should be the compute node
            # instance ids which are not necessarily instances of the node
            # specified by `node_id`.

            # Node instances denoted by these instance ids should be *kept* if
            # possible.
            # 'removed_ids_exclude_hint': [],

            # Node instances denoted by these instance ids should be *removed*
            # if possible.
            # 'removed_ids_include_hint': []
        }
    })
    try:
        ctx.logger.info('Deployment modification started. '
                        '[modification_id={0}]'.format(modification.id))
        if delta > 0:
            added_and_related = set(modification.added.node_instances)
            added = set(i for i in added_and_related
                        if i.modification == 'added')
            related = added_and_related - added
            try:
                lifecycle.install_node_instances(
                    graph=graph,
                    node_instances=added,
                    intact_nodes=related)
            except:
                ctx.logger.error('Scale out failed, scaling back in.')
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                lifecycle.uninstall_node_instances(
                    graph=graph,
                    node_instances=added,
                    intact_nodes=related)
                raise
        else:
            removed_and_related = set(modification.removed.node_instances)
            removed = set(i for i in removed_and_related
                          if i.modification == 'removed')
            related = removed_and_related - removed
            lifecycle.uninstall_node_instances(
                graph=graph,
                node_instances=removed,
                intact_nodes=related)
    except:
        ctx.logger.warn('Rolling back deployment modification. '
                        '[modification_id={0}]'.format(modification.id))
        try:
            modification.rollback()
        except:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
def scale(ctx, node_id, delta, scale_compute, **kwargs):
    """Scales in/out the subgraph of node_id.

    If `scale_compute` is set to false, the subgraph will consist of all
    the nodes that are contained in `node_id` and `node_id` itself.
    If `scale_compute` is set to true, the subgraph will consist of all
    nodes that are contained in the compute node that contains `node_id`
    and the compute node itself.
    If `node_id` is not contained in a compute node and is not a compute node,
    this property is ignored.

    `delta` is used to specify the scale factor.
    For `delta > 0`: If current number of instances is `N`, scale out to
    `N + delta`.
    For `delta < 0`: If current number of instances is `N`, scale in to
    `N - |delta|`.

    :param ctx: cloudify context
    :param node_id: the node_id to scale
    :param delta: scale in/out factor
    :param scale_compute: should scale apply on compute node containing
                          'node_id'
    """
    graph = ctx.graph_mode()
    node = ctx.get_node(node_id)
    if not node:
        raise ValueError("Node {0} doesn't exist".format(node_id))
    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return
    host_node = node.host_node
    scaled_node = host_node if (scale_compute and host_node) else node
    curr_num_instances = scaled_node.number_of_instances
    planned_num_instances = curr_num_instances + delta
    if planned_num_instances < 0:
        raise ValueError('Provided delta: {0} is illegal. current number of'
                         'instances of node {1} is {2}'.format(
                             delta, node_id, curr_num_instances))

    modification = ctx.deployment.start_modification({
        scaled_node.id: {
            'instances': planned_num_instances

            # These following parameters are not exposed at the moment,
            # but should be used to control which node instances get scaled in
            # (when scaling in).
            # They are mentioned here, because currently, the modification API
            # is not very documented.
            # Special care should be taken because if `scale_compute == True`
            # (which is the default), then these ids should be the compute node
            # instance ids which are not necessarily instances of the node
            # specified by `node_id`.

            # Node instances denoted by these instance ids should be *kept* if
            # possible.
            # 'removed_ids_exclude_hint': [],

            # Node instances denoted by these instance ids should be *removed*
            # if possible.
            # 'removed_ids_include_hint': []
        }
    })
    try:
        ctx.logger.info('Deployment modification started. '
                        '[modification_id={0}]'.format(modification.id))
        if delta > 0:
            added_and_related = set(modification.added.node_instances)
            added = set(i for i in added_and_related
                        if i.modification == 'added')
            related = added_and_related - added
            try:
                lifecycle.install_node_instances(graph=graph,
                                                 node_instances=added,
                                                 intact_nodes=related)
            except:
                ctx.logger.error('Scale out failed, scaling back in.')
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                lifecycle.uninstall_node_instances(graph=graph,
                                                   node_instances=added,
                                                   intact_nodes=related)
                raise
        else:
            removed_and_related = set(modification.removed.node_instances)
            removed = set(i for i in removed_and_related
                          if i.modification == 'removed')
            related = removed_and_related - removed
            lifecycle.uninstall_node_instances(graph=graph,
                                               node_instances=removed,
                                               intact_nodes=related)
    except:
        ctx.logger.warn('Rolling back deployment modification. '
                        '[modification_id={0}]'.format(modification.id))
        try:
            modification.rollback()
        except:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
Пример #10
0
def _run_scale_settings(ctx,
                        scale_settings,
                        scalable_entity_properties,
                        scale_transaction_field=None,
                        scale_transaction_value=None,
                        ignore_failure=False,
                        ignore_rollback_failure=True,
                        instances_remove_ids=None,
                        node_sequence=None):
    modification = ctx.deployment.start_modification(scale_settings)
    graph = ctx.graph_mode()
    try:
        ctx.logger.info('Deployment modification started. '
                        '[modification_id={0}]'.format(modification.id))
        if len(set(modification.added.node_instances)):
            ctx.logger.info('Added: {}'.format(
                repr([
                    node_instance._node_instance.id
                    for node_instance in modification.added.node_instances
                    if node_instance.modification == 'added'
                ])))
            added_and_related = set(modification.added.node_instances)
            added = set(i for i in added_and_related
                        if i.modification == 'added')
            related = added_and_related - added
            try:
                for node_instance in added:
                    properties_updates = scalable_entity_properties.get(
                        node_instance._node_instance.node_id, {})
                    # save properties updates
                    properties = {}
                    if properties_updates:
                        # pop one dict for runtime properties
                        properties.update(properties_updates.pop())
                    # save transaction list
                    if scale_transaction_field:
                        # save original set of instances in scale up.
                        if scale_transaction_value:
                            properties.update({
                                scale_transaction_field:
                                scale_transaction_value
                            })
                        else:
                            properties.update(
                                {scale_transaction_field: modification.id})
                    # check properties to update
                    if properties:
                        ctx.logger.debug(
                            "{}: Updating {} runtime properties by {}".format(
                                node_instance._node_instance.node_id,
                                node_instance._node_instance.id,
                                repr(properties)))
                        _update_runtime_properties(
                            ctx, node_instance._node_instance.id, properties)
                if node_sequence:
                    subgraph_func = lifecycle.install_node_instance_subgraph
                    _process_node_instances(
                        ctx=ctx,
                        graph=graph,
                        node_instances=added,
                        ignore_failure=ignore_failure,
                        node_instance_subgraph_func=subgraph_func,
                        node_sequence=node_sequence)
                else:
                    lifecycle.install_node_instances(graph=graph,
                                                     node_instances=added,
                                                     related_nodes=related)
            except Exception as ex:
                ctx.logger.error(
                    'Scale out failed, scaling back in. {}'.format(repr(ex)))
                _uninstall_instances(ctx=ctx,
                                     graph=graph,
                                     removed=added,
                                     related=related,
                                     ignore_failure=ignore_rollback_failure,
                                     node_sequence=node_sequence)
                raise ex

        if len(set(modification.removed.node_instances)):
            ctx.logger.info('Removed: {}'.format(
                repr([
                    node_instance._node_instance.id
                    for node_instance in modification.removed.node_instances
                    if node_instance.modification == 'removed'
                ])))
            removed_and_related = set(modification.removed.node_instances)
            removed = set(i for i in removed_and_related
                          if i.modification == 'removed')
            ctx.logger.info('Proposed: {}'.format(repr(instances_remove_ids)))
            if instances_remove_ids:
                for instance in removed:
                    if instance._node_instance.id not in instances_remove_ids:
                        raise Exception(
                            "Instance {} not in proposed list {}.".format(
                                repr(instance._node_instance.id),
                                repr(instances_remove_ids)))
            related = removed_and_related - removed
            _uninstall_instances(ctx=ctx,
                                 graph=graph,
                                 removed=removed,
                                 ignore_failure=ignore_failure,
                                 related=related,
                                 node_sequence=node_sequence)
    except Exception as ex:
        ctx.logger.warn('Rolling back deployment modification. '
                        '[modification_id={0}]: {1}'.format(
                            modification.id, repr(ex)))
        try:
            deadline = time.time() + ctx.wait_after_fail
        except AttributeError:
            deadline = time.time() + 1800
        while deadline > time.time():
            if graph._is_execution_cancelled():
                raise api.ExecutionCancelled()
            for task in graph._terminated_tasks():
                graph._handle_terminated_task(task)
            if not any(task.get_state() == tasks.TASK_SENT
                       for task in graph.tasks_iter()):
                break
            else:
                time.sleep(0.1)
        modification.rollback()
        raise ex
    else:
        modification.finish()
Пример #11
0
def _run_scale_settings(ctx, scale_settings, scalable_entity_properties,
                        scale_transaction_field=None,
                        scale_transaction_value=None,
                        ignore_failure=False,
                        instances_remove_ids=None):
    modification = ctx.deployment.start_modification(scale_settings)
    graph = ctx.graph_mode()
    try:
        ctx.logger.info('Deployment modification started. '
                        '[modification_id={0}]'.format(modification.id))
        if len(set(modification.added.node_instances)):
            ctx.logger.info('Added: {}'.format(repr([
                node_instance._node_instance.id
                for node_instance in modification.added.node_instances
                if node_instance.modification == 'added'
            ])))
            added_and_related = set(modification.added.node_instances)
            added = set(i for i in added_and_related
                        if i.modification == 'added')
            related = added_and_related - added
            try:
                for node_instance in added:
                    properties_updates = scalable_entity_properties.get(
                        node_instance._node_instance.node_id, {})
                    # save properties updates
                    properties = {}
                    if properties_updates:
                        # pop one dict for runtime properties
                        properties.update(properties_updates.pop())
                    # save transaction list
                    if scale_transaction_field:
                        # save original set of instances in scale up.
                        if scale_transaction_value:
                            properties.update({
                                scale_transaction_field:
                                    scale_transaction_value
                            })
                        else:
                            properties.update({
                                scale_transaction_field: modification.id
                            })
                    # check properties to update
                    if properties:
                        ctx.logger.debug(
                            "{}: Updating {} runtime properties by {}".format(
                                node_instance._node_instance.node_id,
                                node_instance._node_instance.id,
                                repr(properties)))
                        _update_runtime_properties(
                            ctx, node_instance._node_instance.id, properties)
                lifecycle.install_node_instances(
                    graph=graph,
                    node_instances=added,
                    related_nodes=related)
            except Exception as ex:
                ctx.logger.error('Scale out failed, scaling back in. {}'
                                 .format(repr(ex)))
                _uninstall_instances(
                    ctx, graph, [
                        node_instance._node_instance.id
                        for node_instance in added
                    ], [
                        node_instance._node_instance.id
                        for node_instance in related
                    ], ignore_failure)
                raise ex

        if len(set(modification.removed.node_instances)):
            ctx.logger.info('Removed: {}'.format(repr([
                node_instance._node_instance.id
                for node_instance in modification.removed.node_instances
                if node_instance.modification == 'removed'
            ])))
            removed_and_related = set(modification.removed.node_instances)
            removed = set(i for i in removed_and_related
                          if i.modification == 'removed')
            ctx.logger.info('Proposed: {}'
                            .format(repr(instances_remove_ids)))
            if instances_remove_ids:
                for instance in removed:
                    if instance._node_instance.id not in instances_remove_ids:
                        raise Exception(
                            "Instance {} not in proposed list {}.".format(
                                repr(instance._node_instance.id),
                                repr(instances_remove_ids)
                            )
                        )
            related = removed_and_related - removed
            lifecycle.uninstall_node_instances(
                graph=graph,
                node_instances=removed,
                ignore_failure=ignore_failure,
                related_nodes=related)
    except Exception as ex:
        ctx.logger.warn('Rolling back deployment modification. '
                        '[modification_id={0}]: {1}'
                        .format(modification.id, repr(ex)))
        modification.rollback()
        raise ex
    else:
        modification.finish()
Пример #12
0
def scale_entity(ctx,
                 scalable_entity_name,
                 delta,
                 scale_compute,
                 ignore_failure=False,
                 **kwargs):
    """Scales in/out the subgraph of node_or_group_name.

    If a node name is passed, and `scale_compute` is set to false, the
    subgraph will consist of all the nodes that are contained in the node and
    the node itself.
    If a node name is passed, and `scale_compute` is set to true, the subgraph
    will consist of all nodes that are contained in the compute node that
    contains the node and the compute node itself.
    If a group name or a node that is not contained in a compute
    node, is passed, this property is ignored.

    `delta` is used to specify the scale factor.
    For `delta > 0`: If current number of instances is `N`, scale out to
    `N + delta`.
    For `delta < 0`: If current number of instances is `N`, scale in to
    `N - |delta|`.

    :param ctx: cloudify context
    :param scalable_entity_name: the node or group name to scale
    :param delta: scale in/out factor
    :param scale_compute: should scale apply on compute node containing
                          the specified node
    :param ignore_failure: ignore operations failures in uninstall workflow
    """
    if isinstance(delta, basestring):
        try:
            delta = int(delta)
        except ValueError:
            raise ValueError(
                'The delta parameter must be a number. Got: {0}'.format(delta))

    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return

    scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
    if scaling_group:
        curr_num_instances = scaling_group['properties']['current_instances']
        planned_num_instances = curr_num_instances + delta
        scale_id = scalable_entity_name
    else:
        node = ctx.get_node(scalable_entity_name)
        if not node:
            raise ValueError("No scalable entity named {0} was found".format(
                scalable_entity_name))
        host_node = node.host_node
        scaled_node = host_node if (scale_compute and host_node) else node
        curr_num_instances = scaled_node.number_of_instances
        planned_num_instances = curr_num_instances + delta
        scale_id = scaled_node.id

    if planned_num_instances < 0:
        raise ValueError('Provided delta: {0} is illegal. current number of '
                         'instances of entity {1} is {2}'.format(
                             delta, scalable_entity_name, curr_num_instances))
    modification = ctx.deployment.start_modification({
        scale_id: {
            'instances': planned_num_instances

            # These following parameters are not exposed at the moment,
            # but should be used to control which node instances get scaled in
            # (when scaling in).
            # They are mentioned here, because currently, the modification API
            # is not very documented.
            # Special care should be taken because if `scale_compute == True`
            # (which is the default), then these ids should be the compute node
            # instance ids which are not necessarily instances of the node
            # specified by `scalable_entity_name`.

            # Node instances denoted by these instance ids should be *kept* if
            # possible.
            # 'removed_ids_exclude_hint': [],

            # Node instances denoted by these instance ids should be *removed*
            # if possible.
            # 'removed_ids_include_hint': []
        }
    })
    graph = ctx.graph_mode()
    try:
        ctx.logger.info('Deployment modification started. '
                        '[modification_id={0}]'.format(modification.id))
        if delta > 0:
            added_and_related = set(modification.added.node_instances)
            added = set(i for i in added_and_related
                        if i.modification == 'added')
            related = added_and_related - added
            try:
                lifecycle.install_node_instances(graph=graph,
                                                 node_instances=added,
                                                 related_nodes=related)
            except Exception:
                ctx.logger.error('Scale out failed, scaling back in.')
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                lifecycle.uninstall_node_instances(
                    graph=graph,
                    node_instances=added,
                    ignore_failure=ignore_failure,
                    related_nodes=related)
                raise
        else:
            removed_and_related = set(modification.removed.node_instances)
            removed = set(i for i in removed_and_related
                          if i.modification == 'removed')
            related = removed_and_related - removed
            lifecycle.uninstall_node_instances(graph=graph,
                                               node_instances=removed,
                                               ignore_failure=ignore_failure,
                                               related_nodes=related)
    except Exception:
        ctx.logger.warn('Rolling back deployment modification. '
                        '[modification_id={0}]'.format(modification.id))
        try:
            modification.rollback()
        except Exception:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except Exception:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
Пример #13
0
def update(ctx,
           update_id,
           added_instance_ids,
           added_target_instances_ids,
           removed_instance_ids,
           remove_target_instance_ids,
           modified_entity_ids,
           extended_instance_ids,
           extend_target_instance_ids,
           reduced_instance_ids,
           reduce_target_instance_ids,
           skip_install,
           skip_uninstall,
           ignore_failure=False):
    instances_by_change = {
        'added_instances': (added_instance_ids, []),
        'added_target_instances_ids': (added_target_instances_ids, []),
        'removed_instances': (removed_instance_ids, []),
        'remove_target_instance_ids': (remove_target_instance_ids, []),
        'extended_and_target_instances':
            (extended_instance_ids + extend_target_instance_ids, []),
        'reduced_and_target_instances':
            (reduced_instance_ids + reduce_target_instance_ids, []),
    }

    for instance in ctx.node_instances:

        instance_holders = \
            [instance_holder
             for _, (changed_ids, instance_holder)
             in instances_by_change.iteritems()
             if instance.id in changed_ids]

        for instance_holder in instance_holders:
            instance_holder.append(instance)

    if not skip_install:
        graph = ctx.graph_mode()
        # Adding nodes or node instances should be based on modified instances
        lifecycle.install_node_instances(
            graph=graph,
            node_instances=set(instances_by_change['added_instances'][1]),
            related_nodes=set(instances_by_change['added_target_instances_ids']
                              [1]))

        # This one as well.
        lifecycle.execute_establish_relationships(
            graph=ctx.graph_mode(),
            node_instances=set(
                    instances_by_change['extended_and_target_instances'][1]),
            modified_relationship_ids=modified_entity_ids['relationship']
        )

    if not skip_uninstall:
        graph = ctx.graph_mode()

        lifecycle.execute_unlink_relationships(
            graph=graph,
            node_instances=set(
                    instances_by_change['reduced_and_target_instances'][1]),
            modified_relationship_ids=modified_entity_ids['relationship']
        )

        lifecycle.uninstall_node_instances(
            graph=graph,
            node_instances=set(instances_by_change['removed_instances'][1]),
            ignore_failure=ignore_failure,
            related_nodes=set(
                    instances_by_change['remove_target_instance_ids'][1])
        )

    # Finalize the commit (i.e. remove relationships or nodes)
    client = get_rest_client()
    client.deployment_updates.finalize_commit(update_id)