def _restore_certificate(self): archive_cert_dir = os.path.join(self._tempdir, ARCHIVE_CERT_DIR) existing_cert_dir = os.path.dirname(get_local_rest_certificate()) restored_cert_dir = '{0}_from_snapshot_{1}'.format( existing_cert_dir, self._snapshot_id) command = '' # Put the certificates where we need them utils.copy_snapshot_path(archive_cert_dir, restored_cert_dir) certs = [ INTERNAL_CA_CERT_FILENAME, INTERNAL_CA_KEY_FILENAME, INTERNAL_CERT_FILENAME, INTERNAL_KEY_FILENAME, INTERNAL_P12_FILENAME, ] # Restore each cert from the snapshot over the current manager one for cert in certs: subcommand = ( 'mv -f {source_dir}/{cert} {dest_dir}/{cert}; ').format( dest_dir=existing_cert_dir, source_dir=restored_cert_dir, cert=cert, ) command += subcommand if not self._no_reboot: command += 'sudo shutdown -r now' self._post_restore_commands.append(command)
def _get_amqp_client(agent): delete_cert_path = False if agent.get('broker_config'): broker_config = agent['broker_config'] ssl_cert_path = _get_ssl_cert_path(broker_config) # Using a temp path, so we need to delete it delete_cert_path = True else: broker_config = _get_broker_config(agent) ssl_cert_path = get_local_rest_certificate() tenant = get_tenant() try: yield amqp_client.get_client( amqp_host=broker_config.get('broker_ip'), amqp_user=tenant.get('rabbitmq_username'), amqp_port=broker_config.get('broker_port'), amqp_pass=tenant.get('rabbitmq_password'), amqp_vhost=tenant.get('rabbitmq_vhost'), ssl_enabled=broker_config.get('broker_ssl_enabled'), ssl_cert_path=ssl_cert_path ) finally: if delete_cert_path and ssl_cert_path: os.remove(ssl_cert_path)
def _restore_certificate(self): archive_cert_dir = os.path.join(self._tempdir, ARCHIVE_CERT_DIR) old_cert_dir = os.path.dirname(get_local_rest_certificate()) new_cert_dir = '{0}_from_snapshot_{1}'.format(old_cert_dir, self._snapshot_id) old_cert = os.path.join(old_cert_dir, INTERNAL_CERT_FILENAME) old_key = os.path.join(old_cert_dir, INTERNAL_KEY_FILENAME) old_p12 = os.path.join(old_cert_dir, INTERNAL_P12_FILENAME) new_cert = os.path.join(new_cert_dir, INTERNAL_CERT_FILENAME) new_key = os.path.join(new_cert_dir, INTERNAL_KEY_FILENAME) new_p12 = os.path.join(new_cert_dir, INTERNAL_P12_FILENAME) time_to_wait_for_workflow_to_finish = 3 cmd = 'sleep {time}; ' \ 'rm -rf {old_cert} {old_key} {old_p12}; ' \ 'mv {new_cert} {old_cert}; ' \ 'mv {new_key} {old_key}; ' \ 'mv {new_p12} {old_p12}'.format( time=time_to_wait_for_workflow_to_finish, old_cert=old_cert, old_key=old_key, old_p12=old_p12, new_cert=new_cert, new_key=new_key, new_p12=new_p12 ) utils.copy_snapshot_path(archive_cert_dir, new_cert_dir) if not self._no_reboot: cmd += '; sudo shutdown -r now' subprocess.Popen(cmd, shell=True)
def _get_creds_json_content(): with open(get_local_rest_certificate(), 'r') as cert_file: ssl_cert_content = cert_file.read() return json.dumps({ 'ssl_cert_content': ssl_cert_content, 'rest_token': ctx.rest_token })
def _restore_certificate(self): archive_cert_dir = os.path.join(self._tempdir, ARCHIVE_CERT_DIR) old_cert_dir = os.path.dirname(get_local_rest_certificate()) new_cert_dir = old_cert_dir + '_from_snapshot' utils.copy_snapshot_path(archive_cert_dir, new_cert_dir) time_to_wait_for_workflow_to_finish = 3 cmd = 'sleep {0}; rm -rf {1}; mv {2} {1}'.format( time_to_wait_for_workflow_to_finish, old_cert_dir, new_cert_dir) if not self._no_reboot: cmd += '; sudo shutdown -r now' subprocess.Popen(cmd, shell=True)
def copy_files_between_manager_and_snapshot(archive_root, config, to_archive=True, new_tenant=''): """ Copy files/dirs between snapshot/manager and manager/snapshot. :param archive_root: Path to the snapshot archive root. :param config: Config of manager. :param to_archive: If True then copying is from manager to snapshot, otherwise from snapshot to manager. :param new_tenant: a tenant to which the snapshot is restored. Relevant only in the case of restoring a snapshot from a manager of a version older than 4.0.0 """ ctx.logger.info('Copying files/directories...') # Files/dirs with constant relative/absolute paths, # where first path is path in manager, second is path in snapshot. # If paths are relative then should be relative to file server (path # in manager) and snapshot archive (path in snapshot). If paths are # absolute then should point to proper data in manager/snapshot archive data_to_copy = [ (os.path.join( constants.FILE_SERVER_BLUEPRINTS_FOLDER, new_tenant), constants.FILE_SERVER_BLUEPRINTS_FOLDER), (os.path.join( constants.FILE_SERVER_DEPLOYMENTS_FOLDER, new_tenant), constants.FILE_SERVER_DEPLOYMENTS_FOLDER), (os.path.join( constants.FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, new_tenant), constants.FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER), (constants.FILE_SERVER_PLUGINS_FOLDER, constants.FILE_SERVER_PLUGINS_FOLDER) ] local_cert_dir = os.path.dirname(get_local_rest_certificate()) if to_archive: data_to_copy.append((local_cert_dir, ARCHIVE_CERT_DIR)) for (p1, p2) in data_to_copy: # first expand relative paths if p1[0] != '/': p1 = os.path.join(config.file_server_root, p1) if p2[0] != '/': p2 = os.path.join(archive_root, p2) # make p1 to always point to source and p2 to target of copying if not to_archive: p1, p2 = p2, p1 copy_snapshot_path(p1, p2)
def get_resource_from_manager(resource_path, base_url=None, base_urls=None): """Get resource from the manager file server. :param resource_path: path to resource on the file server :param base_url: The base URL to manager file server. Deprecated. :param base_urls: A list of base URL to cluster manager file servers. :param resource_path: path to resource on the file server. :returns: resource content """ base_urls = base_urls or [] base_urls += utils.get_manager_file_server_url() if base_url is not None: base_urls.insert(0, base_url) # if we have multiple managers to try, set connect_timeout so that # we're not waiting forever for a single non-responding manager if len(base_urls) > 1: timeout = (10, None) else: timeout = None verify = utils.get_local_rest_certificate() headers = {} try: headers[constants.CLOUDIFY_EXECUTION_TOKEN_HEADER] = \ ctx.execution_token except NotInContext: headers[constants.CLOUDIFY_EXECUTION_TOKEN_HEADER] = \ workflow_ctx.execution_token for ix, next_url in enumerate(base_urls): url = '{0}/{1}'.format(next_url.rstrip('/'), resource_path.lstrip('/')) try: response = requests.get(url, verify=verify, headers=headers, timeout=timeout) except requests.ConnectionError: continue if not response.ok: is_last = (ix == len(base_urls) - 1) if not is_last: # if there's more managers to try, try them: due to filesystem # replication lag, they might have files that the previous # manager didn't continue raise HttpException(url, response.status_code, response.reason) return response.content raise NonRecoverableError( 'Failed to download {0}: unable to connect to any manager (tried: {1})' .format(resource_path, ', '.join(base_urls)))
def check_liveness(nodes_to_monitor,depl_id): c = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', cert=utils.get_local_rest_certificate(), token= utils.get_rest_token(), tenant= utils.get_tenant_name()) c_influx = InfluxDBClient(host='localhost', port=8086, database='cloudify') log ('nodes_to_monitor: {0}'.format(nodes_to_monitor)) # compare influx data (monitoring) to cloudify desired state for node_name in nodes_to_monitor: instances=c.node_instances.list(depl_id,node_name) for instance in instances: q_string='SELECT MEAN(value) FROM /' + depl_id + '\.' + node_name + '\.' + instance.id + '\.cpu_total_system/ GROUP BY time(10s) '\ 'WHERE time > now() - 40s' log ('query string is {0}'.format(q_string)) try: result=c_influx.query(q_string) log ('result is {0}'.format(result)) if not result: executions=c.executions.list(depl_id) has_pending_execution = False if executions and len(executions)>0: for execution in executions: # log("Execution {0} : {1}".format(execution.id, execution.status)) if execution.status not in execution.END_STATES: has_pending_execution = True if not has_pending_execution: log ('Setting state to error for instance {0} and its children'.format(instance.id)) update_nodes_tree_state(c, depl_id, instance, 'error') params = {'node_instance_id': instance.id} log ('Calling Auto-healing workflow for container instance {0}'.format(instance.id)) c.executions.start(depl_id, 'a4c_heal', params) else: log ('pending executions on the deployment...waiting for the end before calling heal workflow...') except InfluxDBClientError as ee: log ('DBClienterror {0}'.format(str(ee)), level='ERROR') log ('instance id is {0}'.format(instance), level='ERROR') except Exception as e: log (str(e), level='ERROR')
def _restore_certificate(self): archive_cert_dir = os.path.join(self._tempdir, ARCHIVE_CERT_DIR) existing_cert_dir = os.path.dirname(get_local_rest_certificate()) restored_cert_dir = '{0}_from_snapshot_{1}'.format(existing_cert_dir, self._snapshot_id) # Put the certificates where we need them utils.copy_snapshot_path(archive_cert_dir, restored_cert_dir) certs = [ INTERNAL_CA_CERT_FILENAME, INTERNAL_CA_KEY_FILENAME, INTERNAL_CERT_FILENAME, INTERNAL_KEY_FILENAME, INTERNAL_P12_FILENAME, ] # Restore each cert from the snapshot over the current manager one for cert in certs: self._post_restore_commands.append( 'mv -f {source_dir}/{cert} {dest_dir}/{cert}'.format( dest_dir=existing_cert_dir, source_dir=restored_cert_dir, cert=cert, ) ) if not os.path.exists( os.path.join(archive_cert_dir, INTERNAL_CA_CERT_FILENAME)): for source, target in \ [(INTERNAL_CERT_FILENAME, INTERNAL_CA_CERT_FILENAME), (INTERNAL_KEY_FILENAME, INTERNAL_CA_KEY_FILENAME)]: source = os.path.join(CERT_DIR, source) target = os.path.join(CERT_DIR, target) self._post_restore_commands.append( 'cp {source} {target}'.format( source=source, target=target, ) ) if not self._no_reboot: self._post_restore_commands.append('sudo shutdown -r now')
def _restore_certificate(self): archive_cert_dir = os.path.join(self._tempdir, ARCHIVE_CERT_DIR) existing_cert_dir = os.path.dirname(get_local_rest_certificate()) restored_cert_dir = '{0}_from_snapshot_{1}'.format(existing_cert_dir, self._snapshot_id) command = '' # Put the certificates where we need them utils.copy_snapshot_path(archive_cert_dir, restored_cert_dir) certs = [ INTERNAL_CA_CERT_FILENAME, INTERNAL_CA_KEY_FILENAME, INTERNAL_CERT_FILENAME, INTERNAL_KEY_FILENAME, INTERNAL_P12_FILENAME, ] # Restore each cert from the snapshot over the current manager one for cert in certs: subcommand = ( 'mv -f {source_dir}/{cert} {dest_dir}/{cert}; ' ).format( dest_dir=existing_cert_dir, source_dir=restored_cert_dir, cert=cert, ) command += subcommand if not os.path.exists( os.path.join(archive_cert_dir, INTERNAL_CA_CERT_FILENAME)): for source, target in \ [(INTERNAL_CERT_FILENAME, INTERNAL_CA_CERT_FILENAME), (INTERNAL_KEY_FILENAME, INTERNAL_CA_KEY_FILENAME)]: source = os.path.join(CERT_DIR, source) target = os.path.join(CERT_DIR, target) command += 'cp {source} {target};'.format( source=source, target=target) if not self._no_reboot: command += 'sudo shutdown -r now' self._post_restore_commands.append(command)
def get_rest_client(tenant=None, api_token=None): """ :param tenant: optional tenant name to connect as :param api_token: optional api_token to authenticate with (instead of using REST token) :returns: A REST client configured to connect to the manager in context :rtype: cloudify_rest_client.CloudifyClient """ cluster_settings = get_cluster_settings() if cluster_settings: client_class = CloudifyClusterClient else: client_class = CloudifyClient if not tenant: tenant = utils.get_tenant_name() # Handle maintenance mode headers = {} if utils.get_is_bypass_maintenance(): headers['X-BYPASS-MAINTENANCE'] = 'True' # If api_token or execution_token was provided no need to use REST token token = None execution_token = utils.get_execution_token() if execution_token: headers[constants.CLOUDIFY_EXECUTION_TOKEN_HEADER] = execution_token elif api_token: headers[constants.CLOUDIFY_API_AUTH_TOKEN_HEADER] = api_token else: token = utils.get_rest_token() return client_class(headers=headers, host=utils.get_manager_rest_service_host(), port=utils.get_manager_rest_service_port(), tenant=tenant, token=token, protocol=constants.SECURED_PROTOCOL, cert=utils.get_local_rest_certificate(), kerberos_env=utils.get_kerberos_indication( os.environ.get(constants.KERBEROS_ENV_KEY)))
def _get_amqp_client(agent): delete_cert_path = False if agent.get('broker_config'): broker_config = agent['broker_config'] ssl_cert_path = _get_ssl_cert_path(broker_config) # Using a temp path, so we need to delete it delete_cert_path = True else: broker_config = _get_broker_config(agent) ssl_cert_path = get_local_rest_certificate() try: yield amqp_client.get_client( amqp_host=broker_config.get('broker_ip'), amqp_user=ctx.tenant.get('rabbitmq_username'), amqp_port=broker_config.get('broker_port'), amqp_pass=ctx.tenant.get('rabbitmq_password'), amqp_vhost=ctx.tenant.get('rabbitmq_vhost'), ssl_enabled=broker_config.get('broker_ssl_enabled'), ssl_cert_path=ssl_cert_path) finally: if delete_cert_path and ssl_cert_path: os.remove(ssl_cert_path)
def get_resource_from_manager(resource_path, base_url=None): """ Get resource from the manager file server. :param resource_path: path to resource on the file server :returns: resource content """ if base_url is None: base_url = utils.get_manager_file_server_url() url = '{0}/{1}'.format(base_url, resource_path) verify = utils.get_local_rest_certificate() headers = {} try: headers[constants.CLOUDIFY_TOKEN_AUTHENTICATION_HEADER] = \ ctx.rest_token except NotInContext: headers[constants.CLOUDIFY_TOKEN_AUTHENTICATION_HEADER] = \ workflow_ctx.rest_token response = requests.get(url, verify=verify, headers=headers) if not response.ok: raise HttpException(url, response.status_code, response.reason) return response.content
def copy_files_between_manager_and_snapshot(archive_root, config, to_archive=True, tenant_name=None): """ Copy files/dirs between snapshot/manager and manager/snapshot. :param archive_root: Path to the snapshot archive root. :param config: Config of manager. :param to_archive: If True then copying is from manager to snapshot, otherwise from snapshot to manager. :param tenant_name: If passed, will restore files to this tenant name. Expected to be used only for 3.x upgrades. """ ctx.logger.info('Copying files/directories...') data_to_copy = [ constants.FILE_SERVER_BLUEPRINTS_FOLDER, constants.FILE_SERVER_DEPLOYMENTS_FOLDER, constants.FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, constants.FILE_SERVER_PLUGINS_FOLDER, constants.FILE_SERVER_GLOBAL_RESOURCES_FOLDER, constants.FILE_SERVER_TENANT_RESOURCES_FOLDER, constants.FILE_SERVER_AUTHENTICATORS_FOLDER ] # To work with cert dir logic for archiving if tenant_name: # This is a 3.x install, files go in tenant folders data_to_copy = [ ( # The root path to copy the files to in the manager for each # type of restored file # e.g. blueprints/<tenant_name>/ os.path.join(path, tenant_name) # Plugins are an exception as they are all stored in one path # under UUIDs without tenant names if path != constants.FILE_SERVER_PLUGINS_FOLDER else path, # The path of the file type in the snapshot path, ) for path in data_to_copy ] else: # This is a 4.x+ install, files go where they went. data_to_copy = [(path, path) for path in data_to_copy] local_cert_dir = os.path.dirname(get_local_rest_certificate()) if to_archive: data_to_copy.append((local_cert_dir, snapshot_constants.ARCHIVE_CERT_DIR)) data_to_copy.append((SECURITY_FILE_LOCATION, SECURITY_FILENAME)) ctx.logger.info(str(data_to_copy)) for p1, p2 in data_to_copy: # first expand relative paths if p1[0] != '/': p1 = os.path.join(config.file_server_root, p1) if p2[0] != '/': p2 = os.path.join(archive_root, p2) # make p1 to always point to source and p2 to target of copying if not to_archive: p1, p2 = p2, p1 copy_snapshot_path(p1, p2)
def _get_ssl_cert_content(old_agent_version): if ManagerVersion(old_agent_version) < ManagerVersion('4.2'): return None with open(get_local_rest_certificate(), 'r') as cert_file: return cert_file.read()
host = get_host(entity) while host is not None: entity = host host = get_host(entity) return get_attribute(entity, attribute_name) from cloudify import utils from cloudify_rest_client import CloudifyClient from cloudify.state import ctx_parameters as inputs import os client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', cert=utils.get_local_rest_certificate(), token= utils.get_rest_token(), tenant= utils.get_tenant_name()) def convert_env_value_to_string(envDict): for key, value in envDict.items(): envDict[str(key)] = str(envDict.pop(key)) def parse_output(output): # by convention, the last output is the result of the operation last_output = None outputs = {} pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)') for line in output.splitlines(): match = pattern.match(line)