def get_deployment_dir(deployment_name=None, deployment_id=None):
    """ Get the deployment directory.
    :param deployment_name: The deployment ID or name.
    :type deployment_name: str
    :return: Return wrapper_inner.
    """
    deployment_name = deployment_name or deployment_id  # backward compat.
    deployments_old_dir = os.path.join('/opt',
                                       'mgmtworker', 'work', 'deployments',
                                       get_tenant_name(), deployment_name)

    deployments_new_dir = os.path.join('/opt', 'manager',
                                       'resources', 'deployments',
                                       get_tenant_name(), deployment_name)

    if os.path.isdir(deployments_new_dir):
        return deployments_new_dir
    elif os.path.isdir(deployments_old_dir):
        return deployments_old_dir
    else:
        deployment = get_deployment(deployment_name)
        if deployment:
            deployments_id_new_dir = os.path.join('/opt', 'manager',
                                                  'resources', 'deployments',
                                                  get_tenant_name(),
                                                  deployment.id)
            if os.path.isdir(deployments_id_new_dir):
                return deployments_id_new_dir

        raise SDKNonRecoverableError("No deployment directory found!")
Beispiel #2
0
def _cfy_agent_attributes_no_defaults(cloudify_agent):
    if not cloudify_agent.get('process_management'):
        cloudify_agent['process_management'] = {}

    if not cloudify_agent['process_management'].get('name'):
        # user did not specify process management configuration, choose the
        # default one according to os type.
        if cloudify_agent['windows']:
            name = 'nssm'
        else:
            name = 'init.d'
        cloudify_agent['process_management']['name'] = name

    if not cloudify_agent.get('name'):
        if cloudify_agent['local']:
            workflows_worker = cloudify_agent.get('workflows_worker', False)
            suffix = '_workflows' if workflows_worker else ''
            name = '{0}{1}'.format(ctx.deployment.id, suffix)
        else:
            name = ctx.instance.id
        cloudify_agent['name'] = name

    service_name = cloudify_agent.get('service_name')
    if service_name:
        # service_name takes precedence over name (which is deprecated)
        cloudify_agent['name'] = service_name

    if not cloudify_agent.get('queue'):
        # by default, queue of the agent is the same as the name
        cloudify_agent['queue'] = cloudify_agent['name']

    if not cloudify_agent.get('rest_host'):
        cloudify_agent['rest_host'] = \
            cloudify_utils.get_manager_rest_service_host()

    if not cloudify_agent.get('rest_port'):
        cloudify_agent['rest_port'] = \
            cloudify_utils.get_manager_rest_service_port()

    if not cloudify_agent.get('rest_token'):
        cloudify_agent['rest_token'] = \
            cloudify_utils.get_rest_token()

    if not cloudify_agent.get('rest_tenant'):
        cloudify_agent['rest_tenant'] = \
            cloudify_utils.get_tenant_name()

    if not cloudify_agent.get('bypass_maintenance'):
        cloudify_agent['bypass_maintenance_mode'] = \
            cloudify_utils.get_is_bypass_maintenance()
Beispiel #3
0
def get_dep_contexts(version):
    deps = {}
    tenants = [get_tenant_name()] if version < snapshot_constants.V_4_0_0 \
        else get_tenants_list()
    for tenant_name in tenants:
        # Temporarily assign the context a different tenant name so that
        # we can retrieve that tenant's deployment contexts
        with internal_utils._change_tenant(ctx, tenant_name):
            # We have to zero this out each time or the cached version for
            # the previous tenant will be used
            ctx._dep_contexts = None

            # Get deployment contexts for this tenant
            deps[tenant_name] = ctx.deployments_contexts
    return deps.items()
def get_dep_contexts(version):
    deps = {}
    tenants = [get_tenant_name()] if version < snapshot_constants.V_4_0_0 \
        else get_tenants_list()
    for tenant_name in tenants:
        # Temporarily assign the context a different tenant name so that
        # we can retrieve that tenant's deployment contexts
        with internal_utils._change_tenant(ctx, tenant_name):
            # We have to zero this out each time or the cached version for
            # the previous tenant will be used
            ctx._dep_contexts = None

            # Get deployment contexts for this tenant
            deps[tenant_name] = ctx.deployments_contexts
    return deps.items()
def check_liveness(nodes_to_monitor,depl_id):
    c = CloudifyClient(host=utils.get_manager_ip(),
                       port=utils.get_manager_rest_service_port(),
                       protocol='https',
                       cert=utils.get_local_rest_certificate(),
                       token= utils.get_rest_token(),
                       tenant= utils.get_tenant_name())

    c_influx = InfluxDBClient(host='localhost', port=8086, database='cloudify')
    log ('nodes_to_monitor: {0}'.format(nodes_to_monitor))

    # compare influx data (monitoring) to cloudify desired state

    for node_name in nodes_to_monitor:
        instances=c.node_instances.list(depl_id,node_name)
        for instance in instances:
            q_string='SELECT MEAN(value) FROM /' + depl_id + '\.' + node_name + '\.' + instance.id + '\.cpu_total_system/ GROUP BY time(10s) '\
                   'WHERE  time > now() - 40s'
            log ('query string is {0}'.format(q_string))
            try:
               result=c_influx.query(q_string)
               log ('result is {0}'.format(result))
               if not result:
                  executions=c.executions.list(depl_id)
                  has_pending_execution = False
                  if executions and len(executions)>0:
                      for execution in executions:
                      # log("Execution {0} : {1}".format(execution.id, execution.status))
                          if execution.status not in execution.END_STATES:
                              has_pending_execution = True
                  if not has_pending_execution:
                      log ('Setting state to error for instance {0} and its children'.format(instance.id))
                      update_nodes_tree_state(c, depl_id, instance, 'error')
                      params = {'node_instance_id': instance.id}
                      log ('Calling Auto-healing workflow for container instance {0}'.format(instance.id))
                      c.executions.start(depl_id, 'a4c_heal', params)
                  else:
                      log ('pending executions on the deployment...waiting for the end before calling heal workflow...')
            except InfluxDBClientError as ee:
                log ('DBClienterror {0}'.format(str(ee)), level='ERROR')
                log ('instance id is {0}'.format(instance), level='ERROR')
            except Exception as e:
                log (str(e), level='ERROR')
Beispiel #6
0
def get_rest_client(tenant=None, api_token=None):
    """
    :param tenant: optional tenant name to connect as
    :param api_token: optional api_token to authenticate with (instead of
            using REST token)
    :returns: A REST client configured to connect to the manager in context
    :rtype: cloudify_rest_client.CloudifyClient
    """
    cluster_settings = get_cluster_settings()
    if cluster_settings:
        client_class = CloudifyClusterClient
    else:
        client_class = CloudifyClient

    if not tenant:
        tenant = utils.get_tenant_name()

    # Handle maintenance mode
    headers = {}
    if utils.get_is_bypass_maintenance():
        headers['X-BYPASS-MAINTENANCE'] = 'True'

    # If api_token or execution_token was provided no need to use REST token
    token = None
    execution_token = utils.get_execution_token()
    if execution_token:
        headers[constants.CLOUDIFY_EXECUTION_TOKEN_HEADER] = execution_token
    elif api_token:
        headers[constants.CLOUDIFY_API_AUTH_TOKEN_HEADER] = api_token
    else:
        token = utils.get_rest_token()

    return client_class(headers=headers,
                        host=utils.get_manager_rest_service_host(),
                        port=utils.get_manager_rest_service_port(),
                        tenant=tenant,
                        token=token,
                        protocol=constants.SECURED_PROTOCOL,
                        cert=utils.get_local_rest_certificate(),
                        kerberos_env=utils.get_kerberos_indication(
                            os.environ.get(constants.KERBEROS_ENV_KEY)))
        entity = host
        host = get_host(entity)
    return get_attribute(entity, attribute_name)

from cloudify import utils
from cloudify_rest_client import CloudifyClient
from cloudify.state import ctx_parameters as inputs

import os

client = CloudifyClient(host=utils.get_manager_ip(),
                        port=utils.get_manager_rest_service_port(),
                        protocol='https',
                        cert=utils.get_local_rest_certificate(),
                        token= utils.get_rest_token(),
                        tenant= utils.get_tenant_name())

def convert_env_value_to_string(envDict):
    for key, value in envDict.items():
        envDict[str(key)] = str(envDict.pop(key))


def parse_output(output):
    # by convention, the last output is the result of the operation
    last_output = None
    outputs = {}
    pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
    for line in output.splitlines():
        match = pattern.match(line)
        if match is None:
            last_output = line