def set_default_values(self): self._set_name() self.setdefault('network', constants.DEFAULT_NETWORK_NAME) self._set_ips_and_certs() self._set_tenant() # Remove the networks dict as it's no longer needed if 'networks' in self: self.pop('networks') self.setdefault('node_instance_id', ctx.instance.id) self.setdefault('queue', self['name']) self.setdefault('rest_port', cloudify_utils.get_manager_rest_service_port()) self.setdefault('bypass_maintenance', cloudify_utils.get_is_bypass_maintenance()) self.setdefault('min_workers', defaults.MIN_WORKERS) self.setdefault('max_workers', defaults.MAX_WORKERS) self.setdefault('env', {}) self.setdefault('fabric_env', {}) self.setdefault('system_python', 'python') self.setdefault('heartbeat', None) self.setdefault('version', agent_utils.get_agent_version()) self.setdefault('log_level', defaults.LOG_LEVEL) self.setdefault('log_max_bytes', defaults.LOG_FILE_SIZE) self.setdefault('log_max_history', defaults.LOG_BACKUPS) # detach agents dont use sudo, so they don't need disable-requiretty self.setdefault( 'disable_requiretty', self.get('process_management', {}).get('name') != 'detach' )
def set_default_values(self): self._set_name() self.setdefault('network', constants.DEFAULT_NETWORK_NAME) self._set_ips() self._set_broker_cert() # Remove the networks dict as it's no longer needed if 'networks' in self: self.pop('networks') self.setdefault('node_instance_id', ctx.instance.id) self.setdefault('queue', self['name']) self.setdefault('rest_port', cloudify_utils.get_manager_rest_service_port()) self.setdefault('bypass_maintenance', cloudify_utils.get_is_bypass_maintenance()) self.setdefault('min_workers', defaults.MIN_WORKERS) self.setdefault('max_workers', defaults.MAX_WORKERS) self.setdefault('disable_requiretty', True) self.setdefault('env', {}) self.setdefault('fabric_env', {}) self.setdefault('system_python', 'python') self.setdefault('heartbeat', None) self.setdefault('version', agent_utils.get_agent_version()) self.setdefault('log_level', defaults.LOG_LEVEL) self.setdefault('log_max_bytes', defaults.LOG_FILE_SIZE) self.setdefault('log_max_history', defaults.LOG_BACKUPS)
def _cfy_agent_attributes_no_defaults(cloudify_agent): if not cloudify_agent.get('process_management'): cloudify_agent['process_management'] = {} if not cloudify_agent['process_management'].get('name'): # user did not specify process management configuration, choose the # default one according to os type. if cloudify_agent['windows']: name = 'nssm' else: name = 'init.d' cloudify_agent['process_management']['name'] = name if not cloudify_agent.get('name'): if cloudify_agent['local']: workflows_worker = cloudify_agent.get('workflows_worker', False) suffix = '_workflows' if workflows_worker else '' name = '{0}{1}'.format(ctx.deployment.id, suffix) else: name = ctx.instance.id cloudify_agent['name'] = name service_name = cloudify_agent.get('service_name') if service_name: # service_name takes precedence over name (which is deprecated) cloudify_agent['name'] = service_name if not cloudify_agent.get('queue'): # by default, queue of the agent is the same as the name cloudify_agent['queue'] = cloudify_agent['name'] if not cloudify_agent.get('rest_host'): cloudify_agent['rest_host'] = \ cloudify_utils.get_manager_rest_service_host() if not cloudify_agent.get('rest_port'): cloudify_agent['rest_port'] = \ cloudify_utils.get_manager_rest_service_port() if not cloudify_agent.get('rest_token'): cloudify_agent['rest_token'] = \ cloudify_utils.get_rest_token() if not cloudify_agent.get('rest_tenant'): cloudify_agent['rest_tenant'] = \ cloudify_utils.get_tenant_name() if not cloudify_agent.get('bypass_maintenance'): cloudify_agent['bypass_maintenance_mode'] = \ cloudify_utils.get_is_bypass_maintenance()
def set_default_values(self): self._set_name() self._set_network() self.setdefault('queue', self['name']) self.setdefault('rest_port', cloudify_utils.get_manager_rest_service_port()) self.setdefault('bypass_maintenance', cloudify_utils.get_is_bypass_maintenance()) self.setdefault('min_workers', 0) self.setdefault('max_workers', 20) self.setdefault('disable_requiretty', True) self.setdefault('env', {}) self.setdefault('fabric_env', {}) self.setdefault('system_python', 'python') self.setdefault('heartbeat', None) self.setdefault('version', agent_utils.get_agent_version())
def set_default_values(self): self._set_process_management() self._set_name() self._set_network() self.setdefault('queue', self['name']) self.setdefault('rest_token', cloudify_utils.get_rest_token()) self.setdefault('rest_tenant', cloudify_utils.get_tenant()) self.setdefault('rest_port', cloudify_utils.get_manager_rest_service_port()) self.setdefault('bypass_maintenance', cloudify_utils.get_is_bypass_maintenance()) self.setdefault('min_workers', 0) self.setdefault('max_workers', 5) self.setdefault('disable_requiretty', True) self.setdefault('env', {}) self.setdefault('fabric_env', {}) self.setdefault('system_python', 'python')
def create_env_string(cloudify_agent): env = { LOCAL_IP_KEY: cloudify_agent['host'], MANAGER_IP_KEY: get_manager_ip(), MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL_KEY: get_manager_file_server_blueprints_root_url(), MANAGER_FILE_SERVER_URL_KEY: get_manager_file_server_url(), MANAGER_REST_PORT_KEY: get_manager_rest_service_port() } env_string = '' for key, value in env.iteritems(): env_string = '{0} {1}={2}' \ .format(env_string, key, value) return env_string.strip()
def check_liveness(nodes_to_monitor,depl_id): c = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', cert=utils.get_local_rest_certificate(), token= utils.get_rest_token(), tenant= utils.get_tenant_name()) c_influx = InfluxDBClient(host='localhost', port=8086, database='cloudify') log ('nodes_to_monitor: {0}'.format(nodes_to_monitor)) # compare influx data (monitoring) to cloudify desired state for node_name in nodes_to_monitor: instances=c.node_instances.list(depl_id,node_name) for instance in instances: q_string='SELECT MEAN(value) FROM /' + depl_id + '\.' + node_name + '\.' + instance.id + '\.cpu_total_system/ GROUP BY time(10s) '\ 'WHERE time > now() - 40s' log ('query string is {0}'.format(q_string)) try: result=c_influx.query(q_string) log ('result is {0}'.format(result)) if not result: executions=c.executions.list(depl_id) has_pending_execution = False if executions and len(executions)>0: for execution in executions: # log("Execution {0} : {1}".format(execution.id, execution.status)) if execution.status not in execution.END_STATES: has_pending_execution = True if not has_pending_execution: log ('Setting state to error for instance {0} and its children'.format(instance.id)) update_nodes_tree_state(c, depl_id, instance, 'error') params = {'node_instance_id': instance.id} log ('Calling Auto-healing workflow for container instance {0}'.format(instance.id)) c.executions.start(depl_id, 'a4c_heal', params) else: log ('pending executions on the deployment...waiting for the end before calling heal workflow...') except InfluxDBClientError as ee: log ('DBClienterror {0}'.format(str(ee)), level='ERROR') log ('instance id is {0}'.format(instance), level='ERROR') except Exception as e: log (str(e), level='ERROR')
def create_env_string(cloudify_agent): env = { constants.CELERY_WORK_DIR_PATH_KEY: RUNTIME_AGENT_PATH, constants.LOCAL_IP_KEY: cloudify_agent['host'], constants.MANAGER_IP_KEY: utils.get_manager_ip(), constants.MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL_KEY: utils.get_manager_file_server_blueprints_root_url(), constants.MANAGER_FILE_SERVER_URL_KEY: utils.get_manager_file_server_url(), constants.MANAGER_REST_PORT_KEY: utils.get_manager_rest_service_port() } env_string = '' for key, value in env.iteritems(): env_string = '{0} {1}={2}' \ .format(env_string, key, value) return env_string.strip()
def get_rest_client(tenant=None, api_token=None): """ :param tenant: optional tenant name to connect as :param api_token: optional api_token to authenticate with (instead of using REST token) :returns: A REST client configured to connect to the manager in context :rtype: cloudify_rest_client.CloudifyClient """ cluster_settings = get_cluster_settings() if cluster_settings: client_class = CloudifyClusterClient else: client_class = CloudifyClient if not tenant: tenant = utils.get_tenant_name() # Handle maintenance mode headers = {} if utils.get_is_bypass_maintenance(): headers['X-BYPASS-MAINTENANCE'] = 'True' # If api_token or execution_token was provided no need to use REST token token = None execution_token = utils.get_execution_token() if execution_token: headers[constants.CLOUDIFY_EXECUTION_TOKEN_HEADER] = execution_token elif api_token: headers[constants.CLOUDIFY_API_AUTH_TOKEN_HEADER] = api_token else: token = utils.get_rest_token() return client_class(headers=headers, host=utils.get_manager_rest_service_host(), port=utils.get_manager_rest_service_port(), tenant=tenant, token=token, protocol=constants.SECURED_PROTOCOL, cert=utils.get_local_rest_certificate(), kerberos_env=utils.get_kerberos_indication( os.environ.get(constants.KERBEROS_ENV_KEY)))
def _set_ips_and_certs(self): network = self['network'] managers = ctx.get_managers(network=network) brokers = ctx.get_brokers(network=network) self['rest_host'] = [manager.networks[network] for manager in managers] self['broker_ip'] = [broker.networks[network] for broker in brokers] self['rest_ssl_cert'] = '\n'.join( manager.ca_cert_content.strip() for manager in managers if manager.ca_cert_content ) self['broker_ssl_cert'] = '\n'.join( broker.ca_cert_content.strip() for broker in brokers if broker.ca_cert_content ) # setting fileserver url: # using the mgmtworker-local one, not all in the cluster. # This is because mgmtworker will write a script # that is supposed to be downloaded by the agent installer, and that # script will only be served by the local restservice, because other # restservices would only have it available after the delay of # filesystem replication local_manager_hostname = cloudify_utils.get_manager_name() local_manager_network_ip = None for manager in managers: if manager.hostname == local_manager_hostname: local_manager_network_ip = manager.networks[network] break if not local_manager_network_ip: raise RuntimeError( 'No fileserver url for manager {0} on network {1}' .format(local_manager_hostname, self['network'])) self['file_server_url'] = agent_utils.get_manager_file_server_url( local_manager_network_ip, cloudify_utils.get_manager_rest_service_port(), scheme=cloudify_utils.get_manager_file_server_scheme() )
from cloudify import ctx from cloudify.exceptions import NonRecoverableError from cloudify.state import ctx_parameters as inputs import subprocess import os import re import sys import time import threading from StringIO import StringIO from cloudify_rest_client import CloudifyClient from cloudify import utils client = CloudifyClient(utils.get_manager_ip(), utils.get_manager_rest_service_port()) def get_host(entity): if entity.instance.relationships: for relationship in entity.instance.relationships: if 'cloudify.relationships.contained_in' in relationship.type_hierarchy: return relationship.target return None def has_attribute_mapping(entity, attribute_name): ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties)) return ('_a4c_att_' + attribute_name) in entity.node.properties def process_attribute_mapping(entity, attribute_name, data_retriever_function):
from cloudify import ctx from cloudify.exceptions import NonRecoverableError from cloudify.state import ctx_parameters as inputs import subprocess import os import re import sys import time import threading import platform from StringIO import StringIO from cloudify_rest_client import CloudifyClient from cloudify import utils client = CloudifyClient(utils.get_manager_ip(), utils.get_manager_rest_service_port()) def convert_env_value_to_string(envDict): for key, value in envDict.items(): envDict[str(key)] = str(envDict.pop(key)) def get_host(entity): if entity.instance.relationships: for relationship in entity.instance.relationships: if 'cloudify.relationships.contained_in' in relationship.type_hierarchy: return relationship.target return None
def _cfy_agent_attributes_no_defaults(cloudify_agent): if not cloudify_agent.get('process_management'): cloudify_agent['process_management'] = {} if not cloudify_agent['process_management'].get('name'): # user did not specify process management configuration, choose the # default one according to os type. if cloudify_agent['windows']: name = 'nssm' else: name = 'init.d' cloudify_agent['process_management']['name'] = name if not cloudify_agent.get('name'): if cloudify_agent['local']: workflows_worker = cloudify_agent.get('workflows_worker', False) suffix = '_workflows' if workflows_worker else '' name = '{0}{1}'.format(ctx.deployment.id, suffix) else: name = ctx.instance.id cloudify_agent['name'] = name if not cloudify_agent.get('queue'): # by default, queue of the agent is the same as the name cloudify_agent['queue'] = cloudify_agent['name'] if not cloudify_agent.get('file_server_host'): cloudify_agent['file_server_host'] = \ cloudify_utils.get_manager_file_server_host() if not cloudify_agent.get('file_server_port'): cloudify_agent['file_server_port'] = \ cloudify_utils.get_manager_file_server_port() if not cloudify_agent.get('file_server_protocol'): cloudify_agent['file_server_protocol'] = \ cloudify_utils.get_manager_file_server_protocol() if not cloudify_agent.get('rest_host'): cloudify_agent['rest_host'] = \ cloudify_utils.get_manager_rest_service_host() if not cloudify_agent.get('security_enabled'): cloudify_agent['security_enabled'] = \ cloudify_utils.is_security_enabled() if not cloudify_agent.get('rest_protocol'): cloudify_agent['rest_protocol'] = \ cloudify_utils.get_manager_rest_service_protocol() if not cloudify_agent.get('rest_port'): cloudify_agent['rest_port'] = \ cloudify_utils.get_manager_rest_service_port() if not cloudify_agent.get('rest_username'): cloudify_agent['rest_username'] = \ cloudify_utils.get_rest_username() if not cloudify_agent.get('rest_password'): cloudify_agent['rest_password'] = \ cloudify_utils.get_rest_password() if not cloudify_agent.get('rest_token'): cloudify_agent['rest_token'] = \ cloudify_utils.get_rest_token() if not cloudify_agent.get('rest_cert_content'): cloudify_agent['rest_cert_content'] = \ cloudify_utils.get_rest_cert_content() if not cloudify_agent.get('verify_rest_certificate'): cloudify_agent['verify_rest_certificate'] = \ cloudify_utils.is_verify_rest_certificate() if not cloudify_agent.get('bypass_maintenance'): cloudify_agent['bypass_maintenance_mode'] = \ cloudify_utils.get_is_bypass_maintenance()
from cloudify import ctx from cloudify.exceptions import NonRecoverableError from cloudify.state import ctx_parameters as inputs import subprocess import os import re import sys import time import threading import platform from StringIO import StringIO from cloudify_rest_client import CloudifyClient from cloudify import utils if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https": client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True) else: client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port()) def convert_env_value_to_string(envDict): for key, value in envDict.items(): envDict[str(key)] = str(envDict.pop(key)) def get_host(entity): if entity.instance.relationships: for relationship in entity.instance.relationships: if 'cloudify.relationships.contained_in' in relationship.type_hierarchy: return relationship.target return None
def get_attribute_from_top_host(entity, attribute_name): host = get_host(entity) while host is not None: entity = host host = get_host(entity) return get_attribute(entity, attribute_name) from cloudify import utils from cloudify_rest_client import CloudifyClient from cloudify.state import ctx_parameters as inputs import os client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', cert=utils.get_local_rest_certificate(), token= utils.get_rest_token(), tenant= utils.get_tenant_name()) def convert_env_value_to_string(envDict): for key, value in envDict.items(): envDict[str(key)] = str(envDict.pop(key)) def parse_output(output): # by convention, the last output is the result of the operation last_output = None outputs = {} pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')