Esempio n. 1
0
def get_ip(master):
  node=ctx.get_node(master)
  if(ctx.local):
    return ctx.get_node(master).properties['ip']
  else:
    # use default instance ([0])
    instance=node.instances.next()._node_instance
    if(node.type=='cloudify.nodes.DeploymentProxy'):
      r=re.match('.*://(.*):(.*)',instance.runtime_properties['kubernetes_info']['url'])
        f.write("  ip="+r.group(1))
      return(r.group(1))
    else:
Esempio n. 2
0
def run_all_operations(**_):
    node = workflow_ctx.get_node('node1')
    instance = next(node.instances)
    instance.execute_operation('test.op')
    relationship = next(instance.relationships)
    relationship.execute_source_operation('test.op')
    relationship.execute_target_operation('test.op')
Esempio n. 3
0
def create_file(file_name, input_str, input_type, **kwargs):
    ctx.logger.info("In create_file {}".format(file_name))
    ctx.logger.info("input_str {}".format(input_str))
    ctx.logger.info("input_type {}".format(input_type))
    if input_type == 'name':
        node = ctx.get_node(input_str)
        ctx.logger.info(
            "*** create_file is about to be performed on node.id {}".format(
                node.id))
        for instance in node.instances:
            instance.execute_operation(
                "file.interfaces.action.createNewFile",
                kwargs={'process': {
                    'args': [file_name]
                }})
    else:
        for node in ctx.nodes:
            if input_str == node.type:
                ctx.logger.info(
                    "*** create_file is about to be performed on node.id {}".
                    format(node.id))
                for instance in node.instances:
                    instance.execute_operation(
                        "file.interfaces.action.createNewFile",
                        kwargs={'process': {
                            'args': [file_name]
                        }})

    ctx.logger.info("End of create_file")
Esempio n. 4
0
def run_security_update(security_update_number, input_str, input_type,
                        **kwargs):
    ctx.logger.info("In run_security_update {}".format(security_update_number))
    ctx.logger.info("input_str {}".format(input_str))
    ctx.logger.info("input_type {}".format(input_type))
    if input_type == 'name':
        node = ctx.get_node(input_str)
        ctx.logger.info(
            "*** run_security_update is about to be performed on node.id {}".
            format(node.id))
        for instance in node.instances:
            instance.execute_operation(
                "file.interfaces.action.runSecurityUpdate",
                kwargs={'process': {
                    'args': [security_update_number]
                }})
    else:
        for node in ctx.nodes:
            if input_str == node.type:
                ctx.logger.info(
                    "*** run_security_update is about to be performed on node.id {}"
                    .format(node.id))
                for instance in node.instances:
                    instance.execute_operation(
                        "file.interfaces.action.runSecurityUpdate",
                        kwargs={'process': {
                            'args': [security_update_number]
                        }})

    ctx.logger.info("End of run_security_update")
Esempio n. 5
0
def restart_vms(node_id, node_instance_id=None, my_input=None, **kwargs):
    ctx.logger.info("restart_vms node_id {0}".format(node_id))
    node = ctx.get_node(node_id)
    if my_input is None:
        ctx.logger.info("my_input is None. Setting it to NA")
        my_input = "N/A"

    if node_instance_id is None:
        ctx.logger.info(
            "node_instance_id is None. Will run on all the instances of {0}".
            format(node_id))

    for instance in node.instances:
        instance_str = str(instance.id)
        ctx.logger.info("Checking instance.id : {0} of node {1}".format(
            instance_str, str(node_id)))
        if node_instance_id is None or node_instance_id == instance_str:
            ctx.logger.info(
                "Running execute_operation(utils.ops.restart_vm_op) on {0}".
                format(instance_str))
            instance.execute_operation(
                "utils.ops.restart_vm_op",
                kwargs={'process': {
                    'args': [my_input]
                }})
            ctx.logger.info(
                "Ran execute_operation(utils.ops.restart_vm_op) on {0}".format(
                    instance_str))
    ctx.logger.info("End of restart_vms")
def kube_scale(**kwargs):
    setfabenv(kwargs)

    #get resource name
    nodename = kwargs['name']
    node = ctx.get_node(nodename)
    name = node.properties['name']
    amount = 0

    #if the request is an increment, get current value
    if (kwargs['amount'][0] == '+' or kwargs['amount'][0] == '-'):
        output = run(
            "./kubectl -s http://localhost:8080 get rc --no-headers {}".format(
                name))
        curinstances = int(output.stdout.split()[4])
        inc = int(kwargs['amount'])
        amount = curinstances + inc
    else:
        amount = int(kwargs['amount'])

    with open("/tmp/log", "a") as f:
        f.write(
            "running: ./kubectl -s http://localhost:8080 scale --replicas={} rc {}"
            .format(amount, name))
    run("./kubectl -s http://localhost:8080 scale --replicas={} rc {}".format(
        amount, name))
Esempio n. 7
0
def update_resource_in_rms(resource_type, quota, cost_per_unit, **kwargs):

    quota_and_price_str = "Quota: {0:,}, Unit price: ${1:,}".format(int(quota), int(cost_per_unit))
    message_to_rms = "Requesting RMS to add *{0}*. {1} ...".format(resource_type, quota_and_price_str)
    ctx.logger.info(message_to_rms)


    slack_node = ctx.get_node('slack_node')
    incoming_slack_webhook = slack_node.properties['incoming_slack_webhook']
    slack_channel_to_rms = slack_node.properties['slack_channel_to_rms']
    slack_channel_from_rms = slack_node.properties['slack_channel_from_rms']
    slack_failure_channel = slack_node.properties['slack_failure_channel']

    _send_slack_message(incoming_slack_webhook, slack_channel_to_rms, ctx.deployment.id, message_to_rms)

    OK_RESPONSE = "*OK*"
    FAILURE_RESPONSE = "{0}Failure{0}".format("`")
    #Dummy check against the RMS
    rnd_value = random.randint(1, 100)

    rms_response = OK_RESPONSE if rnd_value > 30 else FAILURE_RESPONSE
    message_from_rms = "The response from RMS for adding *{0}* is: {1}".format(resource_type, rms_response)
    _send_slack_message(incoming_slack_webhook, slack_channel_from_rms, ctx.deployment.id, message_from_rms)

    if rms_response == OK_RESPONSE:
        operations_msg = "Added *{0}* to the RMS. {1} ...".format(resource_type, quota_and_price_str)
        _send_slack_message(incoming_slack_webhook, "#operations", ctx.deployment.id, operations_msg)
    else:
        slack_failure_message = "`Failed` to add *{0}* to the RMS. {1} ...".format(resource_type, quota_and_price_str)
        _send_slack_message(incoming_slack_webhook, slack_failure_channel, ctx.deployment.id, slack_failure_message)

    ctx.logger.info("End of adding resource {0} to the RMS.".format(resource_type))
def run_all_operations(**_):
    node = workflow_ctx.get_node('node1')
    instance = next(node.instances)
    instance.execute_operation('test.op')
    relationship = next(instance.relationships)
    relationship.execute_source_operation('test.op')
    relationship.execute_target_operation('test.op')
def get_ip(master):
    node = ctx.get_node(master)
    if (ctx.local):
        return ctx.get_node(master).properties['ip']
    else:
        # use default instance ([0])
        instance = node.instances.next()._node_instance
        if (node.type == 'cloudify.nodes.DeploymentProxy'):
            r = re.match('.*://(.*):(.*)',
                         instance.runtime_properties['kubernetes_info']['url'])
            with open("/tmp/log", "a") as f:
                f.write("instance runtime properties:" +
                        str(instance.runtime_properties))
                f.write("  ip=" + r.group(1))
            return (r.group(1))
        else:
            return (instance.runtime_properties['ip'])
Esempio n. 10
0
def jenkins_run_cmd(cmd_name,arg_value="",key1_name="",key1_value="",**kwargs):
    ctx.logger.info("In jenkins_run_cmd {}".format(cmd_name))
    node = ctx.get_node('jenkins_app')
    ctx.logger.info("jenkins_run_cmd is about to exec on node.id {}".format(node.id))
	
    for instance in node.instances:
        instance.execute_operation("jenkins.interfaces.action.jenkins_cmd",
                                   kwargs={'process': {'args': [cmd_name,arg_value,key1_name,key1_value]}})
    ctx.logger.info("End of jenkins_run_cmd")
Esempio n. 11
0
def setfabenv(kwargs):
  fabenv={}
  master=get_ip(kwargs['master'])
  masternode=ctx.get_node(kwargs['master'])
  if(masternode.type=='cloudify.nodes.DeploymentProxy'):
    #grab proper ip, assumes relationship has copied properties to instance
    fabenv['host_string']=kwargs['ssh_user']+'@'+master
    fabenv['port']=kwargs.get('ssh_port','22')
    fabenv['user']=kwargs['ssh_user']
    fabenv['key_filename']=kwargs['ssh_keyfilename']
  else:
    #requires ssh info be defined on master node
    masternode=ctx.get_node(kwargs['master'])
    fabenv['host_string']=masternode.properties['ssh_user']+'@'+masternode.properties['ip']
    fabenv['port']=masternode.properties['ssh_port']
    fabenv['user']=masternode.properties['ssh_user']
    fabenv['password']=masternode.properties['ssh_password']
    fabenv['key_filename']=masternode.properties['ssh_keyfilename']
  env.update(fabenv)
def setfabenv(kwargs):
    fabenv = {}
    master = get_ip(kwargs['master'])
    masternode = ctx.get_node(kwargs['master'])
    if (masternode.type == 'cloudify.nodes.DeploymentProxy'):
        #grab proper ip, assumes relationship has copied properties to instance
        fabenv['host_string'] = kwargs['ssh_user'] + '@' + master
        fabenv['port'] = kwargs.get('ssh_port', '22')
        fabenv['user'] = kwargs['ssh_user']
        fabenv['key_filename'] = kwargs['ssh_keyfilename']
    else:
        #requires ssh info be defined on master node
        masternode = ctx.get_node(kwargs['master'])
        fabenv['host_string'] = masternode.properties[
            'ssh_user'] + '@' + masternode.properties['ip']
        fabenv['port'] = masternode.properties['ssh_port']
        fabenv['user'] = masternode.properties['ssh_user']
        fabenv['password'] = masternode.properties['ssh_password']
        fabenv['key_filename'] = masternode.properties['ssh_keyfilename']
    env.update(fabenv)
def setfabenv(kwargs):
  master=get_ip(kwargs['master'])
  masternode=ctx.get_node(kwargs['master'])
  url='http://'+master
  fabenv={}
  fabenv['user']=masternode.properties['ssh_username']
  fabenv['password']=masternode.properties['ssh_password']
  fabenv['key_filename']=masternode.properties['ssh_keyfilename']
  fabenv['host_string']=masternode.properties['ssh_username']+'@'+masternode.properties['ip']
  fabenv['port']=masternode.properties['ssh_port']
  env.update(fabenv)
Esempio n. 14
0
def get_instances_of_nodes(node_id=None, node_type=None, deployment_id=None):
    """ Get instances of nodes either by node ID or node type.

    :param node_id: The node ID to filter.
    :param node_type: The node type to filter.
    :param deployment_id: The ID of a deployment node.
    :return list: A list of node instances.
    """
    if node_id:
        controller_node = wtx.get_node(node_id)
        return controller_node.instances
    elif node_type:
        return get_node_instances_by_type(node_type=node_type,
                                          deployment_id=deployment_id)
    else:
        raise NonRecoverableError('No node_id and no node_type provided.')
Esempio n. 15
0
def create_file(file_name,input_str,input_type,**kwargs):
    ctx.logger.info("In create_file {}".format(file_name))
    ctx.logger.info("input_str {}".format(input_str))
    ctx.logger.info("input_type {}".format(input_type))
    if input_type == 'name':
        node = ctx.get_node(input_str)
        ctx.logger.info( "*** create_file is about to be performed on node.id {}".format(node.id))
        for instance in node.instances:
            instance.execute_operation("file.interfaces.action.createNewFile",kwargs={'process': {'args': [file_name]}})
    else:
        for node in ctx.nodes:
            if input_str == node.type:
                ctx.logger.info( "*** create_file is about to be performed on node.id {}".format(node.id))
                for instance in node.instances:
                    instance.execute_operation("file.interfaces.action.createNewFile",kwargs={'process': {'args': [file_name]}})

    ctx.logger.info("End of create_file")
Esempio n. 16
0
def restart_vms(node_id, node_instance_id=None, my_input=None, **kwargs):
    ctx.logger.info("restart_vms node_id {0}".format(node_id))
    node = ctx.get_node(node_id)
    if my_input is None:
        ctx.logger.info("my_input is None. Setting it to NA")
        my_input = "N/A"

    if node_instance_id is None:
        ctx.logger.info("node_instance_id is None. Will run on all the instances of {0}".format(node_id))

    for instance in node.instances:
        instance_str = str(instance.id)
        ctx.logger.info("Checking instance.id : {0} of node {1}".format(instance_str, str(node_id)))
        if node_instance_id is None or node_instance_id == instance_str:
            ctx.logger.info("Running execute_operation(utils.ops.restart_vm_op) on {0}".format(instance_str))
            instance.execute_operation("utils.ops.restart_vm_op", kwargs={'process': {'args': [my_input]}})
            ctx.logger.info("Ran execute_operation(utils.ops.restart_vm_op) on {0}".format(instance_str))
    ctx.logger.info("End of restart_vms")
Esempio n. 17
0
def run_security_update(security_update_number,input_str,input_type,**kwargs):
    ctx.logger.info("In run_security_update {}".format(security_update_number))
    ctx.logger.info("input_str {}".format(input_str))
    ctx.logger.info("input_type {}".format(input_type))
    if input_type == 'name':
        node = ctx.get_node(input_str)
        ctx.logger.info( "*** run_security_update is about to be performed on node.id {}".format(node.id))
        for instance in node.instances:
            instance.execute_operation("file.interfaces.action.runSecurityUpdate",kwargs={'process': {'args': [security_update_number]}})
    else:
        for node in ctx.nodes:
            if input_str == node.type:
                ctx.logger.info( "*** run_security_update is about to be performed on node.id {}".format(node.id))
                for instance in node.instances:
                    instance.execute_operation("file.interfaces.action.runSecurityUpdate",kwargs={'process': {'args': [security_update_number]}})


    ctx.logger.info("End of run_security_update")
Esempio n. 18
0
def jenkins_run_cmd(cmd_name,
                    arg_value="",
                    key1_name="",
                    key1_value="",
                    **kwargs):
    ctx.logger.info("In jenkins_run_cmd {}".format(cmd_name))
    node = ctx.get_node('jenkins_app')
    ctx.logger.info("jenkins_run_cmd is about to exec on node.id {}".format(
        node.id))

    for instance in node.instances:
        instance.execute_operation(
            "jenkins.interfaces.action.jenkins_cmd",
            kwargs={
                'process': {
                    'args': [cmd_name, arg_value, key1_name, key1_value]
                }
            })
    ctx.logger.info("End of jenkins_run_cmd")
Esempio n. 19
0
def kube_scale(**kwargs):
  setfabenv(kwargs)

  #get resource name
  nodename=kwargs['name']
  node=ctx.get_node(nodename)
  name=node.properties['name']
  amount=0

  #if the request is an increment, get current value
  if(kwargs['amount'][0]=='+' or kwargs['amount'][0]=='-'):
    output=run("./kubectl -s http://localhost:8080 get rc --no-headers {}".format(name))
    curinstances=int(output.stdout.split()[4])
    inc=int(kwargs['amount'])
    amount=curinstances+inc
  else:
    amount=int(kwargs['amount'])

  run("./kubectl -s http://localhost:8080 scale --replicas={} rc {}".format(amount,name))
def execute_operation(op, rel, node, kwargs, **_):
    node = workflow_ctx.get_node(node)
    instance = next(node.instances)
    kwargs['rel'] = rel
    if rel == 'source':
        try:
            relationship = next(instance.relationships)
            relationship.execute_source_operation(op, kwargs=kwargs)
        except StopIteration:
            return
    elif rel == 'target':
        try:
            relationship = next(instance.relationships)
            relationship.execute_target_operation(op, kwargs=kwargs)
        except StopIteration:
            return
    elif rel == '':
        instance.execute_operation(op, kwargs=kwargs)
    else:
        raise RuntimeError('not handled: {0}'.format(rel))
Esempio n. 21
0
def graph_scale_down_workflow(delta):
    """Scale down the kubernetes cluster.

    A maximum number of `delta` nodes will be removed from the cluster.

    """
    # Set the workflow to be in graph mode.
    graph = workctx.graph_mode()

    # Get a maximum of `delta` number of workers.
    node = workctx.get_node('kube_worker')
    instances = [instance for instance in node.instances][:delta]

    # Setup events to denote the beginning and end of tasks.
    start_events, done_events = {}, {}

    for i, instance in enumerate(instances):
        start_events[i] = instance.send_event('Removing node cluster')
        done_events[i] = instance.send_event('Node removed from cluster')

    # Create `delta` number of TaskSequence objects. That way we are able to
    # control the sequence of events and the dependencies amongst tasks. One
    # graph sequence corresponds to node being removed from the cluster.
    for i, instance in enumerate(instances):
        sequence = graph.sequence()
        sequence.add(
            start_events[i],
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.stop',
            ),
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.delete',
            ),
            done_events[i],
        )

    # Start execution.
    return graph.execute()
def graph_scale_down_workflow(delta):
    """Scale down the kubernetes cluster.

    A maximum number of `delta` nodes will be removed from the cluster.

    """
    # Set the workflow to be in graph mode.
    graph = workctx.graph_mode()

    # Get a maximum of `delta` number of workers.
    node = workctx.get_node('kube_worker')
    instances = [instance for instance in node.instances][:delta]

    # Setup events to denote the beginning and end of tasks.
    start_events, done_events = {}, {}

    for i, instance in enumerate(instances):
        start_events[i] = instance.send_event('Removing node cluster')
        done_events[i] = instance.send_event('Node removed from cluster')

    # Create `delta` number of TaskSequence objects. That way we are able to
    # control the sequence of events and the dependencies amongst tasks. One
    # graph sequence corresponds to node being removed from the cluster.
    for i, instance in enumerate(instances):
        sequence = graph.sequence()
        sequence.add(
            start_events[i],
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.stop', ),
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.delete', ),
            done_events[i],
        )

    # Start execution.
    return graph.execute()
Esempio n. 23
0
def scale_cluster_down(quantity):
    master = workctx.get_node('kube_master')
    # Get node directly from local-storage in order to have access to all of
    # its runtime_properties
    master_node = LocalStorage.get('kube_master')
    # Public IP of the Kubernetes Master used to remove nodes from the cluster
    master_ip = master_node.runtime_properties['server_ip']
    username = master_node.runtime_properties['auth_user']
    password = master_node.runtime_properties['auth_pass']
    # TODO deprecate this! /
    mist_client = connection.MistConnectionClient(properties=master.properties)
    cloud = mist_client.cloud
    # / deprecate

    worker_name = inputs.get('worker_name')
    if not worker_name:
        raise NonRecoverableError('Kubernetes Worker\'s name is missing')

    machines = cloud.machines(search=worker_name)
    if not machines:
        workctx.logger.warn(
            'Cannot find node \'%s\'. Already removed? '
            'Exiting...', worker_name)
        return

    workctx.logger.info('Terminating %d Kubernetes Worker(s)...',
                        len(machines))
    counter = 0

    # Get all nodes via the kubernetes API. This will give us access to all
    # nodes' metadata. If the master node does not expose a publicly accessible
    # IP address, then the connection will fail. In that case, we won't be
    # able to retrieve and verify the list of nodes in order to remove them
    # from the cluster.
    try:
        url = 'https://%s:%s@%s' % (username, password, master_ip)
        nodes = requests.get('%s/api/v1/nodes' % url, verify=False)
    except Exception as exc:
        if netaddr.IPAddress(master_ip).is_private():
            raise NonRecoverableError(
                'Cannot connect to the kubernetes master to automatically '
                'remove nodes from the cluster. It seems like the kubernetes '
                'master listens at a private IP address. You can manually '
                'remove nodes by destroying them or by simply disassociating '
                'them from the kubernetes cluster. For instance, the current '
                'node can be removed from the cluster by issuing an HTTP '
                'DELETE request at https://%s:%s@%s/api/v1/nodes/%s from the '
                'same network' % (username, password, master_ip, worker_name))
        raise NonRecoverableError('Connection to master failed: %s', exc)
    if not nodes.ok:
        raise NonRecoverableError('Got %s: %s', nodes.status_code, nodes.text)
    nodes = nodes.json()

    # If any of the machines specified, match a kubernetes node, then
    # we attempt to remove the node from the cluster and destroy it.
    for m in machines:
        for node in nodes['items']:
            labels = node['metadata']['labels']
            if labels['kubernetes.io/hostname'] == m.name:
                if 'node-role.kubernetes.io/master' in labels.iterkeys():
                    raise NonRecoverableError('Cannot remove master')
                break
        else:
            workctx.logger.error('%s does not match a kubernetes node', m)
            continue

        workctx.logger.info('Removing %s from cluster', m)
        api = node['metadata']['selfLink']
        resp = requests.delete('%s%s' % (url, api), verify=False)
        if not resp.ok:
            workctx.logger.error('Bad response from kubernetes: %s', resp.text)

        workctx.logger.info('Destroying machine')
        m.destroy()

        # FIXME Why?
        counter += 1
        if counter == quantity:
            break

    workctx.logger.info('Downscaling the kubernetes cluster completed!')
Esempio n. 24
0
from cloudify.workflows import ctx

instance = next(ctx.get_node('node').instances)
op2_result = instance.execute_operation('test.op2').get()
instance.execute_operation('test.op1', kwargs={'property': op2_result})
Esempio n. 25
0
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
    scaled_node = ctx.get_node(node_id)
    if not scaled_node:
        raise ValueError("Node {0} doesn't exist".format(node_id))
    if not is_host_node(scaled_node):
        raise ValueError(
            "Node {0} is not a host. This workflow can only scale hosts".
            format(node_id))
    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return

    curr_num_instances = scaled_node.number_of_instances
    planned_num_instances = curr_num_instances + delta
    if planned_num_instances < 1:
        raise ValueError('Provided delta: {0} is illegal. current number of'
                         'instances of node {1} is {2}'.format(
                             delta, node_id, curr_num_instances))

    modification = ctx.deployment.start_modification(
        {scaled_node.id: {
            'instances': planned_num_instances
        }})
    ctx.logger.info(
        'Deployment modification started. [modification_id={0} : {1}]'.format(
            modification.id, dir(modification)))
    try:
        if delta > 0:
            ctx.logger.info('Scaling host {0} adding {1} instances'.format(
                node_id, delta))
            added_and_related = _get_all_nodes(modification.added)
            added = _get_all_modified_node_instances(added_and_related,
                                                     'added')
            graph = ctx.graph_mode()
            ctx.internal.send_workflow_event(
                event_type='a4c_workflow_started',
                message=build_pre_event(WfStartEvent('scale', 'install')))
            custom_context = CustomContext(ctx, added, added_and_related)
            install_host(ctx, graph, custom_context, node_id)
            try:
                graph.execute()
            except:
                ctx.logger.error(
                    'Scale failed. Uninstalling node {0}'.format(node_id))
                graph = ctx.internal.task_graph
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                try:
                    custom_context = CustomContext(ctx, added,
                                                   added_and_related)
                    uninstall_host(ctx, graph, custom_context, node_id)
                    graph.execute()
                except:
                    ctx.logger.error(
                        'Node {0} uninstallation following scale failure has failed'
                        .format(node_id))
                raise
        else:
            ctx.logger.info('Unscaling host {0} removing {1} instances'.format(
                node_id, delta))
            removed_and_related = _get_all_nodes(modification.removed)
            removed = _get_all_modified_node_instances(removed_and_related,
                                                       'removed')
            graph = ctx.graph_mode()
            ctx.internal.send_workflow_event(
                event_type='a4c_workflow_started',
                message=build_pre_event(WfStartEvent('scale', 'uninstall')))
            custom_context = CustomContext(ctx, removed, removed_and_related)
            uninstall_host(ctx, graph, custom_context, node_id)
            try:
                graph.execute()
            except:
                ctx.logger.error('Unscale failed.')
                raise
    except:
        ctx.logger.warn(
            'Rolling back deployment modification. [modification_id={0}]'.
            format(modification.id))
        try:
            modification.rollback()
        except:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
Esempio n. 26
0
from cloudify.workflows import ctx


instance = next(ctx.get_node('node').instances)
op2_result = instance.execute_operation('test.op2').get()
instance.execute_operation('test.op1', kwargs={'property': op2_result})
def run_multi(**_):
    node = workflow_ctx.get_node('node1')
    for instance in node.instances:
        instance.execute_operation('test.op')
Esempio n. 28
0
def graph_scale_up_workflow(delta, worker_data_list):
    """Scale up the kubernetes cluster.

    This method implements the scale up workflow using the Graph Framework.

    Scaling is based on the `delta` input, which must be greater than 0 for
    the workflow to run.

    """
    # Set the workflow to be in graph mode.
    graph = workctx.graph_mode()

    # Get the instance for which to add an execute operation task to the graph.
    node = workctx.get_node('kube_worker')
    instance = [instance for instance in node.instances][0]

    # Setup events to denote the beginning and end of tasks. The events will be
    # also used to control dependencies amongst tasks.
    start_events, done_events = {}, {}

    for i in range(delta):
        start_events[i] = instance.send_event('Adding node to cluster')
        done_events[i] = instance.send_event('Node added to cluster')

    # Prepare the operations' kwargs.
    operation_kwargs_list = []

    for worker_data in worker_data_list:
        if worker_data.get('machine_id'):
            operation_kwargs_list.append(
                {
                    'cloud_id': worker_data.get('cloud_id'),
                    'machine_id': worker_data['machine_id'],
                }
            )
        else:
            operation_kwargs_list.append(
                {
                    'key_id': worker_data.get('key_id', ''),
                    'size_id': worker_data.get('size_id', ''),
                    'image_id': worker_data.get('image_id', ''),
                    'cloud_id': worker_data.get('cloud_id', ''),
                    'machine_id': '',
                    'networks': worker_data.get('networks', []),
                    'location_id': worker_data.get('location_id', ''),
                }
            )

    # Create `delta` number of TaskSequence objects. That way we are able to
    # control the sequence of events and the dependencies amongst tasks. One
    # graph sequence corresponds to a new node added to the cluster.
    for i in range(delta):
        sequence = graph.sequence()
        sequence.add(
            start_events[i],
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.clone',
            ),
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.create',
                kwargs=operation_kwargs_list[i],
            ),
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.configure',
            ),
            done_events[i],
        )

    # Now, we use the events to control the tasks' dependencies, ensuring that
    # tasks are executed in the correct order. We aim to create dependencies
    # between a sequence's last event and the next sequence's initial event.
    # That way, we ensure that sequences are executed sequentially, and not in
    # parallel. This is required, since the cloudify.interfaces.lifecycle.clone
    # operation modifies the node instances in local-storage and we want to
    # avoid having multiple tasks messing with the same files at the same time.
    for i in range(delta - 1):
        graph.add_dependency(start_events[i + 1], done_events[i])

    # Start execution.
    return graph.execute()
Esempio n. 29
0
def graph_scale_workflow(delta):
    """Scale up the kubernetes cluster.

    This method implements the scale up workflow using the Graph Framework.

    Scaling is based on the `delta` input, which must be greater than 0 for
    the workflow to run.

    """
    # Set the workflow to be in graph mode.
    graph = workctx.graph_mode()

    # Get the instance for which to add an execute operation task to the graph.
    node = workctx.get_node('kube_worker')
    instance = [instance for instance in node.instances][0]

    # Setup events to denote the beginning and end of tasks. The events will be
    # also used to control dependencies amongst tasks.
    start_events, done_events = {}, {}

    for i in range(delta):
        start_events[i] = instance.send_event('Adding node to cluster')
        done_events[i] = instance.send_event('Node added to cluster')

    # Create `delta` number of TaskSequence objects. That way we are able to
    # control the sequence of events and the dependencies amongst tasks. One
    # graph sequence corresponds to a new node added to the cluster.
    for i in range(delta):
        sequence = graph.sequence()
        sequence.add(
            start_events[i],
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.clone', ),
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.create',
                kwargs={
                    'cloud_id': inputs.get('mist_cloud', ''),
                    'image_id': inputs.get('mist_image', ''),
                    'size_id': inputs.get('mist_size', ''),
                    'location_id': inputs.get('mist_location', ''),
                    'networks': inputs.get('mist_networks', []),
                    'key': inputs.get('mist_key', ''),
                },
            ),
            instance.execute_operation(
                operation='cloudify.interfaces.lifecycle.configure', ),
            done_events[i],
        )

    # Now, we use the events to control the tasks' dependencies, ensuring that
    # tasks are executed in the correct order. We aim to create dependencies
    # between a sequence's last event and the next sequence's initial event.
    # That way, we ensure that sequences are executed sequentially, and not in
    # parallel. This is required, since the cloudify.interfaces.lifecycle.clone
    # operation modifies the node instances in local-storage and we want to
    # avoid having multiple tasks messing with the same files at the same time.
    for i in range(delta - 1):
        graph.add_dependency(start_events[i + 1], done_events[i])

    # Start execution.
    return graph.execute()
from cloudify.workflows import ctx
from cloudify.workflows import parameters as inputs


consumer = ctx.get_node('consumer')
consumer = [instance for instance in consumer.instances][0]
consumer.execute_operation(
    'cloudify.interfaces.lifecycle.remove',
    kwargs={'clouds': inputs.clouds}
)
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
    scaled_node = ctx.get_node(node_id)
    if not scaled_node:
        raise ValueError("Node {0} doesn't exist".format(node_id))
    if not is_host_node(scaled_node):
        raise ValueError("Node {0} is not a host. This workflow can only scale hosts".format(node_id))
    if delta == 0:
        ctx.logger.info('delta parameter is 0, so no scaling will take place.')
        return

    curr_num_instances = scaled_node.number_of_instances
    planned_num_instances = curr_num_instances + delta
    if planned_num_instances < 1:
        raise ValueError('Provided delta: {0} is illegal. current number of'
                         'instances of node {1} is {2}'
                         .format(delta, node_id, curr_num_instances))

    modification = ctx.deployment.start_modification({
        scaled_node.id: {
            'instances': planned_num_instances
        }
    })
    ctx.logger.info(
        'Deployment modification started. [modification_id={0} : {1}]'.format(modification.id, dir(modification)))
    try:
        if delta > 0:
            ctx.logger.info('Scaling host {0} adding {1} instances'.format(node_id, delta))
            added_and_related = _get_all_nodes(modification.added)
            added = _get_all_modified_node_instances(added_and_related, 'added')
            graph = ctx.graph_mode()
            ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                             message=build_pre_event(WfStartEvent('scale', 'install')))
            custom_context = CustomContext(ctx, added, added_and_related)
            install_host(ctx, graph, custom_context, node_id)
            try:
                graph.execute()
            except:
                ctx.logger.error('Scale failed. Uninstalling node {0}'.format(node_id))
                graph = ctx.internal.task_graph
                for task in graph.tasks_iter():
                    graph.remove_task(task)
                try:
                    custom_context = CustomContext(ctx, added, added_and_related)
                    uninstall_host(ctx, graph, custom_context, node_id)
                    graph.execute()
                except:
                    ctx.logger.error('Node {0} uninstallation following scale failure has failed'.format(node_id))
                raise
        else:
            ctx.logger.info('Unscaling host {0} removing {1} instances'.format(node_id, delta))
            removed_and_related = _get_all_nodes(modification.removed)
            removed = _get_all_modified_node_instances(removed_and_related, 'removed')
            graph = ctx.graph_mode()
            ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
                                             message=build_pre_event(WfStartEvent('scale', 'uninstall')))
            custom_context = CustomContext(ctx, removed, removed_and_related)
            uninstall_host(ctx, graph, custom_context, node_id)
            try:
                graph.execute()
            except:
                ctx.logger.error('Unscale failed.')
                raise
    except:
        ctx.logger.warn('Rolling back deployment modification. [modification_id={0}]'.format(modification.id))
        try:
            modification.rollback()
        except:
            ctx.logger.warn('Deployment modification rollback failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
        raise
    else:
        try:
            modification.finish()
        except:
            ctx.logger.warn('Deployment modification finish failed. The '
                            'deployment model is most likely in some corrupted'
                            ' state.'
                            '[modification_id={0}]'.format(modification.id))
            raise
def get_ip(master):
  if(ctx.local):
    return ctx.get_node(master).properties['ip']
  else:
    raise('not implemented')  # need to get default instance in cloud case
def run_multi(**_):
    node = workflow_ctx.get_node('node1')
    for instance in node.instances:
        instance.execute_operation('test.op')