def create_keypair(keypair): if utils.should_use_external_resource(): keypair.private_key = ctx.get_resource(keypair.private_key_path) keypair.public_key = ctx.get_resource( os.path.expanduser(keypair.public_key_path)) ctx.instance.runtime_properties[ constants.RESOURCE_ID] = keypair.public_key else: keypair.create()
def configure(subject=None): subject = subject or ctx ctx.logger.info('Configuring HAProxy.') template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME)) ctx.logger.debug('Building a dict object that will contain variables ' 'to write to the Jinja2 template.') config = subject.node.properties.copy() config.update(dict( frontend_id=subject.node.name, backends=subject.instance.runtime_properties.get('backends', {}))) ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH)) ctx.logger.debug('The config dict: {0}.'.format(config)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo /usr/sbin/haproxy -f {0} -c'.format(temp_config.name), error_message='Failed to Configure') _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH), error_message='Failed to write to {0}.'.format(CONFIG_PATH))
def _get_task(tasks_file, task_name): ctx.logger.debug('Getting tasks file...') try: tasks_code = ctx.get_resource(tasks_file) except Exception as e: raise exceptions.NonRecoverableError( "Could not get '{0}' ({1}: {2})".format(tasks_file, type(e).__name__, e)) exec_globs = exec_env.exec_globals(tasks_file) try: exec_(tasks_code, _globs_=exec_globs) except Exception as e: raise exceptions.NonRecoverableError( "Could not load '{0}' ({1}: {2})".format(tasks_file, type(e).__name__, e)) task = exec_globs.get(task_name) if not task: raise exceptions.NonRecoverableError( "Could not find task '{0}' in '{1}'" .format(task_name, tasks_file)) if not callable(task): raise exceptions.NonRecoverableError( "'{0}' in '{1}' is not callable" .format(task_name, tasks_file)) return task
def prepare(): confFile=ctx.get_resource(CONFIG_FILE_NAME) ctx.logger.info('prop1: {0}'.format(env.host_string)) ctx.logger.info('prop2: {0}'.format(env.user)) ctx.logger.info('prop3: {0}'.format(env.password)) ctx.logger.info('Config: {0}'.format(confFile))
def prepare(): confFile=ctx.get_resource(CONFIG_FILE_NAME) ctx.logger.info('prop2: {0}'.format(inputs['host_string'])) ctx.logger.info('prop3: {0}'.format(inputs['user'])) ctx.logger.info('prop4: {0}'.format(inputs['password'])) ctx.logger.info('Folder: {0}'.format(confFile))
def create(fco_api, *args, **kwargs): ctx.logger.info('Starting SSH key creation') # Ease of access _rp = ctx.instance.runtime_properties _np = ctx.node.properties # Check if existing server is to be used if _np[PROP_USE_EXISTING]: key = get_resource(fco_api, _np[PROP_RESOURCE_ID], RT.SSHKEY) _rp[RPROP_UUID] = key.resourceUUID return _rp[RPROP_UUID] # Get configuration try: private_key = ctx.get_resource(_np[PROP_PRIVATE_KEY]) private_key_exists = True except NonRecoverableError as e: if 'HttpException: 404' in str(e) or 'IOError: [Errno 2]' in str(e): private_key = os.path.expanduser(_np[PROP_PRIVATE_KEY]) private_key_exists = False elif 'HttpException: 403' in str(e): raise NonRecoverableError('Bad permissions on key, cannot access.') else: raise except IOError as e: if e.errno == errno.ENOENT: private_key = os.path.expanduser(_np[PROP_PRIVATE_KEY]) private_key_exists = False else: raise user = _np[PROP_USER] or None global_ = _np[PROP_GLOBAL] # Get public key, generate private key if necessary if not private_key_exists: key = RSA.generate(2048) with open(private_key, 'w') as f: os.chmod(private_key, 0600) f.write(key.exportKey()) else: key = RSA.importKey(private_key) public_key = key.publickey().exportKey(format='OpenSSH') key_name = '{}{}_{}'.format(ctx.bootstrap_context.resources_prefix, ctx.deployment.id, ctx.instance.id) key_uuid = create_ssh_key(fco_api, public_key, user, global_, key_name) _rp[RPROP_UUID] = key_uuid ctx.logger.info('SSH Key created: %s', key_uuid) return key_uuid
def _process_source(source): split = source.split('://') schema = split[0] the_rest = ''.join(split[1:]) if schema in ['http', 'https']: return requests.get(source).text elif schema == 'file' and the_rest: with open(the_rest) as f: return f.read() else: return ctx.get_resource(source)
def download_resource_and_render(source, destination, params): ctx.logger.info('Downloading resource {0} to {1}'.format(source, destination)) template = Template(ctx.get_resource(source)) ctx.logger.debug('The config dict for the Jinja2 template: {0}.'.format(params)) ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(destination)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(params)) sudo(['mv', temp_config.name, destination]) ctx.logger.debug('Great success')
def deploy_job(script, inputs, credentials, wm_type, workdir, name, logger, skip_cleanup): # pylint: disable=W0613 """ Exec a deployment job script that receives SSH credentials as input """ wm = WorkloadManager.factory(wm_type) if not wm: raise NonRecoverableError( "Workload Manager '" + wm_type + "' not supported.") # Execute the script and manage the output success = False client = SshClient(credentials) if wm._create_shell_script(client, name, ctx.get_resource(script), logger, workdir=workdir): call = "./" + name for dinput in inputs: str_input = str(dinput) if ('\n' in str_input or ' ' in str_input) and str_input[0] != '"': call += ' "' + str_input + '"' else: call += ' ' + str_input _, exit_code = client.execute_shell_command( call, workdir=workdir, wait_result=True) if exit_code is not 0: logger.warning( "failed to deploy job: call '" + call + "', exit code " + str(exit_code)) else: success = True if not skip_cleanup: if not client.execute_shell_command( "rm " + name, workdir=workdir): logger.warning("failed removing bootstrap script") client.close_connection() return success
def stopwf(**kwargs): config = ConfigParser.ConfigParser() buf = StringIO.StringIO(ctx.get_resource('orchestration.cfg')) config.readfp(buf) ctx.send_event(' -----node name-----> ' + ctx.node.name) VDC_NAME = ctx.node.properties[ctx.node.name] host = ctx.node.properties['host'] definitionName = config.get('DECOMMISSION','definitionName') payload = config.get('DECOMMISSION','payload') payload = payload.replace("$VDC_NAME",VDC_NAME) ctx.send_event('running commissioning workflow -- host: ' + host + ' definition: ' + definitionName + ' vDC name: ' + VDC_NAME)
def execute(params, template_file, **kwargs): if not params: params = {} if not template_file: ctx.logger.info('Processing finished. No template file provided.') return ctx.logger.info('Execute:\n' 'params: {}\n' 'template_file: {}'.format(params, template_file)) runtime_properties = ctx.instance.runtime_properties.copy() ctx.logger.info( 'Runtime properties get_all: {}'.format(runtime_properties)) for ctxrel in ctx.instance.relationships: ctx.logger.info('ctx instance: {}'.format(ctxrel.type)) ctx.logger.info('ctx instance: {}'.format( ctxrel.target.instance.runtime_properties)) ctx.logger.info('ctx instance: {}'.format( ctxrel.target.node.properties)) # Replace host config with runtime propertie (instead of rearchitect the plugin) params['host'] = ctx.instance.host_ip runtime_properties.update(params) ctx.logger.debug('Runtime properties: {}'.format(runtime_properties)) template = ctx.get_resource(template_file) request_props = ctx.node.properties.copy() ctx.logger.debug('request_props: {}'.format(request_props)) try: ctx.instance.runtime_properties.update( utility.process(params, template, request_props)) except exceptions.NonRecoverableResponseException as e: ctx.logger.debug('--sss--> Nonrecoverable: {}'.format(e)) raise NonRecoverableError(e) except (exceptions.RecoverableResponseException, exceptions.RecoverableStatusCodeCodeException) as e: ctx.logger.debug('--sss--> Recoverable: {}'.format(e)) raise RecoverableError(e) except Exception as e: ctx.logger.info('Exception traceback : {}'.format( traceback.format_exc())) raise NonRecoverableError(e)
def _execute(params, template_file, instance, node): if not template_file: ctx.logger.info('Processing finished. No template file provided.') return template = ctx.get_resource(template_file) try: instance.runtime_properties.update( utility.process(params, template, node.properties.copy())) except (exceptions.ExpectationException, exceptions.RecoverebleStatusCodeCodeException) as e: raise RecoverableError(e) except Exception as e: ctx.logger.info('Exception traceback : {}'.format( traceback.format_exc())) raise NonRecoverableError(e)
def download_resource_and_render(source, destination, params): ctx.logger.info('Downloading resource {0} to {1}'.format( source, destination)) template = Template(ctx.get_resource(source)) ctx.logger.debug( 'The config dict for the Jinja2 template: {0}.'.format(params)) ctx.logger.debug( 'Rendering the Jinja2 template to {0}.'.format(destination)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(params)) sudo(['mv', temp_config.name, destination]) ctx.logger.debug('Great success')
def install_plugins(): install_plugins_script = 'install_plugins.sh' ctx.logger.info('Installing plugins') # Getting all the required plugins plugins = ctx.source.node.properties.get('plugins', {}) # Shame to do all the work for nothing if plugins: # create location to place tar-gzipped plugins in cloudify_plugins = 'cloudify/plugins' _run_command('mkdir -p ~/{0}'.format(cloudify_plugins)) # for each plugin that is included in the blueprint, tar-gzip it # and place it in the plugins dir on the host for name, plugin in plugins.items(): source = plugin['source'] if source.split('://')[0] in ['http', 'https']: continue # temporary workaround to resolve absolute file path # to installed plugin using internal local workflows storage # information plugin_path = os.path.join(ctx._endpoint.storage.resources_root, source) with tempfile.TemporaryFile() as fileobj: with tarfile.open(fileobj=fileobj, mode='w:gz') as tar: tar.add(plugin_path, arcname=name) fileobj.seek(0) tar_remote_path = '{0}/{1}.tar.gz' \ .format(cloudify_plugins, name) fabric.api.put(fileobj, '~/{0}'.format(tar_remote_path)) plugin['source'] = 'file://$HOME/{0}'.format(tar_remote_path) # render script template and copy it to host's home dir script_template = ctx.get_resource('components/restservice/' 'scripts/install_plugins.sh') script = jinja2.Template(script_template).render(plugins=plugins) fabric.api.put(local_path=StringIO(script), remote_path='~/cloudify/{0}' .format(install_plugins_script)) # Execute the rendered script _run_command('chmod +x ~/cloudify/{0} && ~/cloudify/{0}' .format(install_plugins_script))
def install_plugins(): install_plugins_script = 'install_plugins.sh' ctx.logger.info('Installing plugins') # Getting all the required plugins plugins = ctx.node.properties.get('plugins', {}) # Shame to do all the work for nothing if plugins: # create location to place tar-gzipped plugins in cloudify_plugins = 'cloudify/plugins' _run_command('mkdir -p ~/{0}'.format(cloudify_plugins)) # for each plugin that is included in the blueprint, tar-gzip it # and place it in the plugins dir on the host for name, plugin in plugins.items(): source = plugin['source'] if source.split('://')[0] in ['http', 'https']: continue # temporary workaround to resolve absolute file path # to installed plugin using internal local workflows storage # information plugin_path = os.path.join(ctx._endpoint.storage.resources_root, source) with tempfile.TemporaryFile() as fileobj: with tarfile.open(fileobj=fileobj, mode='w:gz') as tar: tar.add(plugin_path, arcname=name) fileobj.seek(0) tar_remote_path = '{0}/{1}.tar.gz'\ .format(cloudify_plugins, name) fabric.api.put(fileobj, '~/{0}'.format(tar_remote_path)) plugin['source'] = 'file://$HOME/{0}'.format(tar_remote_path) # render script template and copy it to host's home dir script_template = ctx.get_resource('components/restservice/' 'scripts/install_plugins.sh') script = jinja2.Template(script_template).render(plugins=plugins) fabric.api.put( local_path=StringIO(script), remote_path='~/cloudify/{0}'.format(install_plugins_script)) # Execute the rendered script _run_command('chmod +x ~/cloudify/{0} && ~/cloudify/{0}'.format( install_plugins_script))
def _get_script(startup_script): """In plugin versions 1.0.0-1.0.1, startup-script was a either a string or a dict. The dict would have the keys type and script. 1.1.0 Introduces a structure that is more consistent with the GCP API. This method supports both. """ if hasattr(startup_script, 'get'): startup_script_metadata = { 'key': startup_script.get('key', 'startup-script') } if startup_script.get('type') == 'file': startup_script_metadata['value'] = \ ctx.get_resource(startup_script.get('script')) elif startup_script.get('type') == 'string': startup_script_metadata['value'] = startup_script.get('script') else: startup_script_metadata['value'] = startup_script.get('value') else: startup_script_metadata = { 'key': 'startup-script', 'value': startup_script if isinstance(startup_script, _compat.text_type) else '' } install_agent_script = ctx.agent.init_script() os_family = ctx.node.properties['os_family'] if install_agent_script: existing_startup_script_value = startup_script_metadata['value'] if startup_script_metadata.get('key') in POWERSHELL_SCRIPTS and \ os_family == 'windows': split_agent_script = re.split('{0}|{1}'.format(PS_OPEN, PS_CLOSE), install_agent_script) split_agent_script.insert(0, existing_startup_script_value) split_agent_script.insert(0, PS_OPEN) split_agent_script.insert(len(split_agent_script), PS_CLOSE) else: split_agent_script = [existing_startup_script_value, install_agent_script] new_startup_script_value = '\n'.join(split_agent_script) startup_script_metadata['value'] = new_startup_script_value return startup_script_metadata
def create(instance_type, image_id, name, zone, external_ip, startup_script, scopes, user_data, **kwargs): if zone: ctx.instance.runtime_properties[constants.GCP_ZONE] = zone gcp_config = utils.get_gcp_config() gcp_config['network'] = utils.get_gcp_resource_name(gcp_config['network']) script = '' if not startup_script: startup_script = ctx.instance.runtime_properties.get('startup_script') #TODO: make it pythonistic ctx.logger.info('The script is {0}'.format(str(startup_script))) if startup_script and startup_script.get('type') == 'file': script = ctx.get_resource(startup_script.get('script')) elif startup_script and startup_script.get('type') == 'string': script = startup_script.get('script') instance_name = utils.get_final_resource_name(name) instance = Instance(gcp_config, ctx.logger, name=instance_name, image=image_id, machine_type=instance_type, external_ip=external_ip, startup_script=script, scopes=scopes, user_data=user_data) ctx.instance.runtime_properties[constants.NAME] = instance.name if ctx.node.properties['install_agent']: add_to_security_groups(instance) disk = ctx.instance.runtime_properties.get(constants.DISK) if disk: instance.disks = [disk] utils.create(instance) set_ip(instance)
def execute_cmd(): ctx.logger.info('Start Main') prepare() confFile=ctx.get_resource(CONFIG_FILE_NAME) fortinet_host=env.host_string fortinet_user=env.user fortinet_pass=env.password fortinet_vdom='root' running = confFile ctx.logger.info('Connect to Fortinet: {0}'.format(fortinet_host)) env.hosts = [fortinet_host] ctx.logger.info('execute command') run(confFile) ctx.logger.info('Done')
def main(): ctx.logger.info('Start Main') prepare() confFile=ctx.get_resource(CONFIG_FILE_NAME) fortinet_host=inputs['host_string'] fortinet_user=inputs['user'] fortinet_pass=inputs['password'] fortinet_vdom='root' install_pyfg() d = FortiOS(fortinet_host, username=fortinet_user, password=fortinet_pass) d.open() ctx.logger.info('Executing Command: {0}'.format(confFile)) d.execute_command(confFile) d.close()
def execute_relation(params, template_file, **kwargs): if not params: params = {} if not template_file: ctx.logger.info('Processing finished. No template file provided.') return ctx.logger.debug('Execute:\n' 'params: {}\n' 'template_file: {}'.format(params, template_file)) runtime_properties = ctx.source.node.properties.copy() runtime_properties.update(params) ctx.logger.debug('Runtime properties: {}'.format(runtime_properties)) template = ctx.get_resource(template_file) request_props = ctx.source.node.properties.copy() ctx.logger.debug('request_props: {}'.format(request_props)) ctx.logger.debug('params: {}'.format(params)) ctx.logger.debug('template_file: {}'.format(template_file)) try: ctx.target.instance.runtime_properties.update( utility.process(params, template, request_props)) except exceptions.NonRecoverableResponseException as e: ctx.logger.debug('--sss--> Nonrecoverable: {}'.format(e)) raise NonRecoverableError(e) except (exceptions.RecoverableResponseException, exceptions.RecoverableStatusCodeCodeException) as e: ctx.logger.debug('--sss--> Recoverable: {}'.format(e)) raise RecoverableError(e) except Exception as e: ctx.logger.info('Exception traceback : {}'.format( traceback.format_exc())) raise NonRecoverableError(e)
def get_external_resource(config): for f in config.get('write_files', []): if not isinstance(f, dict): break try: content = f.get('content') if isinstance(content, dict): resource_type = content.get('resource_type', '') resource_name = content.get('resource_name', '') template_variables = content.get('template_variables', {}) if 'file_resource' == resource_type: if template_variables: new_content = ctx.get_resource_and_render( resource_name, template_variables) else: new_content = ctx.get_resource(resource_name) f['content'] = new_content except ValueError: ctx.logger.debug('No external resource recognized.') pass return config
def deploy_job(script, inputs, credentials, wm_type, workdir, name, logger, skip_cleanup): # pylint: disable=W0613 """ Exec a eployment job script that receives SSH credentials as input """ wm = WorkloadManager.factory(wm_type) if not wm: raise NonRecoverableError("Workload Manager '" + wm_type + "' not supported.") # Execute the script and manage the output client = SshClient(credentials['host'], credentials['user'], credentials['password'], use_login_shell=credentials['login_shell']) if wm._create_shell_script(client, name, ctx.get_resource(script), logger, workdir=workdir): call = "./" + name for dinput in inputs: call += ' ' + dinput _, exit_code = wm._execute_shell_command(client, call, workdir=workdir, wait_result=True) if exit_code is not 0: logger.warning("failed to deploy job: call '" + call + "', exit code " + str(exit_code)) if not skip_cleanup: if not wm._execute_shell_command( client, "rm " + name, workdir=workdir): logger.warning("failed removing bootstrap script") client.close_connection() return exit_code is 0
def _get_task(tasks_file, task_name): ctx.logger.debug('getting tasks file...') try: tasks_code = ctx.get_resource(tasks_file) except Exception as e: raise exceptions.NonRecoverableError( "Could not get '{0}' ({1}: {2})".format(tasks_file, type(e).__name__, e)) exec_globs = exec_env.exec_globals(tasks_file) try: exec_(tasks_code, _globs_=exec_globs) except Exception as e: raise exceptions.NonRecoverableError( "Could not load '{0}' ({1}: {2})".format(tasks_file, type(e).__name__, e)) task = exec_globs.get(task_name) if not task: raise exceptions.NonRecoverableError( "Could not find task '{0}' in '{1}'".format(task_name, tasks_file)) if not callable(task): raise exceptions.NonRecoverableError( "'{0}' in '{1}' is not callable".format(task_name, tasks_file)) return task
def gen_xml_template(kwargs, template_params, default_template): # templates template_resource = kwargs.get('template_resource') template_content = kwargs.get('template_content') if template_resource: template_content = ctx.get_resource(template_resource) if not (template_resource or template_content): resource_dir = resource_filename(__name__, 'templates') template_resource = '{}/{}.xml'.format(resource_dir, default_template) ctx.logger.info("Will be used internal: %s" % template_resource) if not template_content: with open(template_resource) as object_desc: template_content = object_desc.read() params = {"ctx": ctx} if template_params: params.update(template_params) xmlconfig = filters.render_template(template_content, params) ctx.logger.debug(repr(xmlconfig)) return xmlconfig
def run(**kwargs): """main entry point for all calls""" calls = kwargs.get('calls', []) if not calls: ctx.logger.info("No calls") return try: ctx_properties = ctx.node.properties ctx_instance = ctx.instance except cfy_exc.NonRecoverableError: # Realationships context? ctx_properties = ctx.target.node.properties ctx_instance = ctx.target.instance # credentials properties = ctx_properties terminal_auth = properties.get('terminal_auth', {}) terminal_auth.update(kwargs.get('terminal_auth', {})) ip_list = terminal_auth.get('ip') # if node contained in some other node, try to overwrite ip if not ip_list: ip_list = [ctx_instance.host_ip] ctx.logger.info("Used host from container: %s" % str(ip_list)) if isinstance(ip_list, basestring): ip_list = [ip_list] user = terminal_auth.get('user') password = terminal_auth.get('password') key_content = terminal_auth.get('key_content') port = terminal_auth.get('port', 22) if not ip_list or not user: raise cfy_exc.NonRecoverableError( "please check your credentials, ip or user not set") # additional settings global_promt_check = terminal_auth.get('promt_check') global_error_examples = terminal_auth.get('errors') exit_command = terminal_auth.get('exit_command', 'exit') # save logs to debug file log_file_name = None if terminal_auth.get('store_logs'): log_file_name = "/tmp/terminal-%s_%s_%s.log" % (str( ctx.execution_id), str(ctx_instance.id), str(ctx.workflow_id)) ctx.logger.info("Communication logs will be saved to %s" % log_file_name) connection = terminal_connection.connection() for ip in ip_list: try: prompt = connection.connect(ip, user, password, key_content, port, global_promt_check, logger=ctx.logger, log_file_name=log_file_name) ctx.logger.info("Will be used: " + ip) break except Exception as ex: ctx.logger.info( "Can't connect to:{} with exception:{} and type:{}".format( repr(ip), str(ex), str(type(ex)))) else: raise cfy_exc.OperationRetry(message="Let's try one more time?") ctx.logger.info("Device prompt: " + prompt) for call in calls: responses = call.get('responses', []) promt_check = call.get('promt_check', global_promt_check) error_examples = call.get('errors', global_error_examples) # use action if exist operation = call.get('action', "") # use template if have if not operation and 'template' in call: template_name = call.get('template') template_params = call.get('params') template = ctx.get_resource(template_name) if not template: ctx.logger.info("Empty template.") continue template_engine = Template(template) if not template_params: template_params = {} # save context for reuse in template template_params['ctx'] = ctx operation = template_engine.render(template_params) # incase of template_text if not operation and 'template_text' in call: template_params = call.get('params') template = call.get('template_text') if not template: ctx.logger.info("Empty template_text.") continue template_engine = Template(template) if not template_params: template_params = {} # save context for reuse in template template_params['ctx'] = ctx operation = template_engine.render(template_params) if not operation: continue if responses: ctx.logger.info("We have predefined responses: " + str(responses)) ctx.logger.debug("Template: \n" + str(operation)) result = "" for op_line in operation.split("\n"): # skip empty lines if not op_line.strip(): continue ctx.logger.info("Executing template...") ctx.logger.debug("Execute: " + op_line) result_part = connection.run(op_line, promt_check, error_examples, responses) if result_part.strip(): ctx.logger.info(result_part.strip()) result += (result_part + "\n") # save results to runtime properties save_to = call.get('save_to') if save_to: ctx.logger.info("For save: " + result.strip()) ctx_instance.runtime_properties[save_to] = result.strip() while not connection.is_closed() and exit_command: ctx.logger.info("Execute close") result = connection.run(exit_command, promt_check, error_examples) ctx.logger.info("Result of close: " + result) connection.close()
def configure(subject=None): subject = subject or ctx # Get bind floating IP relationships = subject.instance.relationships public_ip = '' for element in relationships: if element.type == 'cloudify.relationships.contained_in': for elements in element.target.instance.relationships: if elements.type == 'cloudify.openstack.server_connected_to_floating_ip': public_ip = elements.target.instance.runtime_properties[ 'floating_ip_address'] ctx.logger.info('Creating private domain file') template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_PRIVATE)) PRIVATE_DOMAIN = subject.node.properties['private_domain'] CONFIG_PATH_PRIVATE = '/etc/bind/db.{0}'.format(PRIVATE_DOMAIN) ctx.logger.debug('Building a dict object that will contain variables ' 'to write to the Jinja2 template.') config = subject.node.properties.copy() config.update( dict(backends=subject.instance.runtime_properties.get('backends', {}), host_ip=subject.instance.host_ip, public_ip=public_ip)) # Generate private domain file from jinja template ctx.logger.debug( 'Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_PRIVATE)) ctx.logger.debug('The config dict: {0}.'.format(config)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_PRIVATE), error_message='Failed to write to {0}.'.format(CONFIG_PATH_PRIVATE)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_PRIVATE), error_message='Failed to change permissions {0}.'.format( CONFIG_PATH_PRIVATE)) ctx.logger.info('Creating public domain file') PUBLIC_DOMAIN = subject.node.properties['public_domain'] CONFIG_PATH_PUBLIC = '/etc/bind/db.{0}'.format(PUBLIC_DOMAIN) ctx.logger.debug( 'Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_PUBLIC)) ctx.logger.debug('The config dict: {0}.'.format(config)) template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_PUBLIC)) # Generate public domain file from jinja template with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_PUBLIC), error_message='Failed to write to {0}.'.format(CONFIG_PATH_PUBLIC)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_PUBLIC), error_message='Failed to change permissions {0}.'.format( CONFIG_PATH_PUBLIC)) # Reload bind server to reload new domain configuration reload()
def install(subject=None): subject = subject or ctx # Install bind server and dependancies ctx.logger.debug('Installing BIND DNS server') _run('sudo apt-get update', error_message='Failed to update package lists') _run('sudo DEBIAN_FRONTEND=noninteractive apt-get install bind9 --yes', error_message='Failed to install BIND packages') # Generate bind config files from jinja template template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_LOCAL_CONF)) config = subject.node.properties.copy() config.update( dict(name='bind', host_ip=subject.instance.host_ip, etcd_ip=subject.instance.host_ip)) ctx.logger.debug( 'Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_LOCAL_CONF)) ctx.logger.debug('The config dict: {0}.'.format(config)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mkdir -p /etc/clearwater', error_message='Failed to create clearwater config directory.') _run( 'sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_LOCAL_CONF), error_message='Failed to write to {0}.'.format(CONFIG_PATH_LOCAL_CONF)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_LOCAL_CONF), error_message='Failed to change permissions {0}.'.format( CONFIG_PATH_LOCAL_CONF)) ctx.logger.debug( 'Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_NAMED)) ctx.logger.debug('The config dict: {0}.'.format(config)) template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_NAMED)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_NAMED), error_message='Failed to write to {0}.'.format(CONFIG_PATH_NAMED)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_NAMED), error_message='Failed to change permissions {0}.'.format( CONFIG_PATH_NAMED)) # Generate shared_config file for clearwater-etcd software template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_ETCD)) ctx.logger.debug( 'Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_ETCD)) ctx.logger.debug('The config dict: {0}.'.format(config)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_ETCD), error_message='Failed to write to {0}.'.format(CONFIG_PATH_ETCD)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_ETCD), error_message='Failed to change permissions {0}.'.format( CONFIG_PATH_ETCD)) configure(subject=None)
def install(subject=None): subject = subject or ctx # Install bind server and dependancies ctx.logger.debug('Installing BIND DNS server') _run('sudo apt-get update', error_message='Failed to update package lists') _run('sudo DEBIAN_FRONTEND=noninteractive apt-get install bind9 --yes', error_message='Failed to install BIND packages') # Generate bind config files from jinja template template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_LOCAL_CONF)) config = subject.node.properties.copy() config.update(dict( name='bind', host_ip=subject.instance.host_ip, etcd_ip=subject.instance.host_ip)) ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_LOCAL_CONF)) ctx.logger.debug('The config dict: {0}.'.format(config)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mkdir -p /etc/clearwater', error_message='Failed to create clearwater config directory.') _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_LOCAL_CONF), error_message='Failed to write to {0}.'.format(CONFIG_PATH_LOCAL_CONF)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_LOCAL_CONF), error_message='Failed to change permissions {0}.'.format(CONFIG_PATH_LOCAL_CONF)) ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_NAMED)) ctx.logger.debug('The config dict: {0}.'.format(config)) template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_NAMED)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_NAMED), error_message='Failed to write to {0}.'.format(CONFIG_PATH_NAMED)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_NAMED), error_message='Failed to change permissions {0}.'.format(CONFIG_PATH_NAMED)) # Generate shared_config file for clearwater-etcd software template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_ETCD)) ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_ETCD)) ctx.logger.debug('The config dict: {0}.'.format(config)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_ETCD), error_message='Failed to write to {0}.'.format(CONFIG_PATH_ETCD)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_ETCD), error_message='Failed to change permissions {0}.'.format(CONFIG_PATH_ETCD)) configure(subject=None)
def create(instance_type, image_id, name, external_ip, startup_script, scopes, tags, zone=None, can_ip_forward=False, additional_settings=None, **kwargs): props = ctx.instance.runtime_properties gcp_config = utils.get_gcp_config() script = '' if startup_script: if startup_script.get('type') == 'file': script = ctx.get_resource(startup_script.get('script')) elif startup_script.get('type') == 'string': script = startup_script.get('script') else: raise NonRecoverableError('invalid script type: {}'.format( startup_script.get('type'))) ctx.logger.info('The script is {0}'.format(str(startup_script))) ssh_keys = get_ssh_keys() network, subnetwork = utils.get_net_and_subnet(ctx) if zone: zone = props['zone'] = utils.get_gcp_resource_name(zone) else: if props.get('zone', False): zone = props['zone'] elif subnetwork: zone = props['zone'] = random.choice( constants.REGION_ZONES_FULL[basename( utils.get_network_node( ctx).instance.runtime_properties['region'])]) else: zone = props['zone'] = utils.get_gcp_resource_name( gcp_config['zone']) instance_name = utils.get_final_resource_name(name) instance = Instance( gcp_config, ctx.logger, name=instance_name, image=image_id, machine_type=instance_type, external_ip=external_ip, startup_script=script, scopes=scopes, tags=tags, ssh_keys=ssh_keys, network=network, subnetwork=subnetwork, zone=zone, can_ip_forward=can_ip_forward, additional_settings=additional_settings, ) utils.create(instance)
def create(fco_api, *args, **kwargs): ctx.logger.info('starting server creation') # Ease of access _rp = ctx.instance.runtime_properties _np = ctx.node.properties # Check if existing server is to be used if _np[PROP_USE_EXISTING]: server = get_resource(fco_api, _np[PROP_RESOURCE_ID, RT.SERVER]) if not server.nics: raise Exception('No NICs attached to server') _rp[RPROP_UUID] = server.resourceUUID _rp[RPROP_DISKS] = [d.resourceUUID for d in server.disks] _rp[RPROP_NIC] = server.nics[0].resourceUUID _rp[RPROP_NICS] = [n.resourceUUID for n in server.nics] _rp[RPROP_IP] = server.nics[0].ipAddresses[0].ipAddress _rp[RPROP_USER] = server.initialUser _rp[RPROP_PASS] = server.initialPassword return (_rp[RPROP_UUID], _rp[RPROP_IP], _rp[RPROP_USER], _rp[RPROP_PASS]) # Get configuration image = get_resource(fco_api, _np[PROP_IMAGE], RT.IMAGE) if _np[PROP_IMAGE]: vdc = get_resource(fco_api, _np[PROP_VDC], RT.VDC) else: vdc = None network = get_resource(fco_api, _np[PROP_NET], RT.NETWORK) server_po = get_resource(fco_api, _np[PROP_SERVER_PO], RT.PRODUCTOFFER) manager_key = get_resource(fco_api, _np[PROP_MANAGER_KEY], RT.SSHKEY) cpu_count = _np[PROP_CPU_COUNT] ram_amount = _np[PROP_RAM_AMOUNT] public_keys = _np[PROP_PUBLIC_KEYS] or [] private_keys = _np[PROP_PRIVATE_KEYS] or [] # Verify existence of private keys missing_keys = set() bad_permission_keys = set() key_contents = {} for key in private_keys: try: key_contents[key] = ctx.get_resource(os.path.expanduser(key)) except NonRecoverableError as e: if 'HttpException: 404' in str(e): missing_keys.add(key) elif 'HttpException: 403' in str(e): bad_permission_keys.add(key) else: raise if missing_keys or bad_permission_keys: raise Exception('Missing private keys: {}\nBad permission keys: {}' .format(missing_keys, bad_permission_keys)) # Generate missing configuration image_uuid = image.resourceUUID if vdc is not None: cluster_uuid = vdc.clusterUUID vdc_uuid = vdc.resourceUUID else: cluster_uuid = image.clusterUUID vdc_uuid = image.vdcUUID network_uuid = network.resourceUUID network_type = network.networkType server_po_uuid = server_po.resourceUUID manager_key_uuid = manager_key.resourceUUID # TODO: better way of determining suitable disk boot_disk_po_uuid = get_resource(fco_api, '{} GB Storage Disk'.format(image.size), RT.PRODUCTOFFER).resourceUUID ctx.logger.info('Configuration: \n' 'image_uuid: %s\n' 'cluster_uuid: %s\n' 'vdc_uuid: %s\n' 'network_uuid: %s\n' 'server_po_uuid: %s\n' 'manager_key_uuid: %s\n' 'boot_disk_po_uuid: %s', image_uuid, cluster_uuid, vdc_uuid, network_uuid, server_po_uuid, manager_key_uuid, boot_disk_po_uuid) # Create server server_name = '{}{}_{}'.format(ctx.bootstrap_context.resources_prefix, ctx.deployment.id, ctx.instance.id) try: server_uuid = _rp[RPROP_UUID] except KeyError: # key_obj = get_resource(fco_api, key_uuid, RT.SSHKEY) # keys = SSHKey.REQUIRED_ATTRIBS.copy() # keys.add('resourceUUID') # submit_key = {} # for k in keys: # try: # submit_key[k] = getattr(manager_key, k) # except AttributeError: # submit_key[k] = None server_uuid = create_server(fco_api, server_po_uuid, image_uuid, cluster_uuid, vdc_uuid, cpu_count, ram_amount, boot_disk_po_uuid, [manager_key], server_name) _rp[RPROP_UUID] = server_uuid ctx.logger.info('server_uuid: %s', server_uuid) server = get_resource(fco_api, server_uuid, RT.SERVER) server_nics = [nic.resourceUUID for nic in server.nics] server_keys = [key.resourceUUID for key in server.sshkeys] # Wait for server to be active if not wait_for_state(fco_api, server_uuid, enums.ResourceState.ACTIVE, RT.SERVER): raise Exception('Server failed to prepare in time!') ctx.logger.info('Server ACTIVE') # Add keys new_keys = set() for key in public_keys: if key not in server_keys: key_uuid = create_ssh_key(fco_api, key, server_name + ' Key') attach_ssh_key(fco_api, server_uuid, key_uuid) new_keys.add(key_uuid) ctx.logger.info('Keys attached: %s', new_keys) # Create NIC try: nic_uuid = _rp[RPROP_NIC] except KeyError: nic_uuid = create_nic(fco_api, cluster_uuid, network_type, network_uuid, vdc_uuid, server_name + ' NIC') if not wait_for_state(fco_api, nic_uuid, enums.ResourceState.ACTIVE, RT.NIC): raise Exception('NIC failed to create in time!') _rp[RPROP_NIC] = nic_uuid ctx.logger.info('nic_uuid: %s', nic_uuid) # Stop server if started if get_server_status(fco_api, server_uuid) != enums.ServerStatus.STOPPED: if not stop_server(fco_api, server_uuid): raise Exception('Stopping server failed to complete in time!') ctx.logger.info('Server STOPPED') # Attach NIC if nic_uuid not in server_nics: job_uuid = attach_nic(fco_api, server_uuid, nic_uuid, 1).resourceUUID cond = cobjects.Job.status == enums.JobStatus.SUCCESSFUL if not wait_for_cond(fco_api, job_uuid, cond, RT.JOB): raise Exception('Attaching NIC failed to complete in time!') ctx.logger.info('NICs attached') else: ctx.logger.info('NICs already attached') # Start server if not started if get_server_status(fco_api, server_uuid) == enums.ServerStatus.STOPPED: if not start_server(fco_api, server_uuid): raise Exception('Running server failed to complete in time!') ctx.logger.info('Server RUNNING') nic = get_resource(fco_api, nic_uuid, RT.NIC) server_ip = nic.ipAddresses[0].ipAddress server_port = 22 ctx.logger.info('Server READY') username = server.initialUser password = server.initialPassword ssh_attempts = -1 ssh_delay = 3 # Fabric test while ssh_attempts: ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts)) try: with settings(host_string=server_po_uuid, user=username, password=password, disable_known_hosts=True, abort_exception=Exception): run('mkdir ~/.ssh') run('chmod 0700 ~/.ssh') for key, key_content in key_contents.items(): remote = os.path.join('~', '.ssh', os.path.basename(key)) run('echo \'{}\' > {}'.format(key_content, remote)) run('chmod 0600 ' + remote) ctx.logger.info('Done') break except Exception as e: ctx.logger.info(e) ssh_attempts -= 1 else: raise Exception('Failed to provision keys in time') # # Spur test # while ssh_attempts: # ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts)) # shell = spur.SshShell( # hostname=server_ip, # port=server_port, # username=username, # password=password, # shell_type=spur.ssh.ShellTypes.minimal, # missing_host_key=spur.ssh.MissingHostKey.accept # ) # with shell: # try: # ctx.logger.info('Creating & chmoding .ssh') # shell.run(['mkdir', '~/.ssh']) # shell.run(['chmod', '0700', '~/.ssh']) # for key, key_content in key_contents.items(): # ctx.logger.info('Adding private key: ' + remote) # remote = os.path.join('~', '.ssh', os.path.basename(key)) # shell.run(['echo', "'{}'".format(key_content), '>', # remote]) # shell.run(['chmod', '0600', remote]) # except spur.ssh.ConnectionError as e: # if e.original_error[0] not in {errno.ECONNREFUSED, # errno.EHOSTUNREACH}: # raise # sleep(ssh_delay) # ssh_attempts -= 1 # else: # raise Exception('Failed to provision keys in time') # # Provision private keys # ssh = SSHClient() # call(['ssh-keygen', '-R', server_ip]) # ssh.set_missing_host_key_policy(AutoAddPolicy()) # # while ssh_attempts: # try: # ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts)) # ctx.logger.info('SSH Connection details: {}'.format( # ((server_ip, server_port, username, password, ssh_delay)))) # ssh.connect(server_ip, server_port, username, password, # timeout=ssh_delay, look_for_keys=False) # ctx.logger.info('SSH connection established') # break # except socket.timeout: # ssh_attempts -= 1 # except socket.error as e: # if e[0] not in {errno.ECONNREFUSED, errno.EHOSTUNREACH}: # ctx.logger.info('SSH connection failed: %s', e[0]) # raise # sleep(ssh_delay) # ssh_attempts -= 1 # else: # raise Exception('Failed to provision keys in time') # ssh.exec_command('mkdir ~/.ssh') # ssh.exec_command('chmod 0700 ~/.ssh') # for key, key_content in key_contents.items(): # remote = os.path.join('~', '.ssh', os.path.basename(key)) # ssh.exec_command('echo \'{}\' > {}'.format(key_content, remote)) # ssh.exec_command('chmod 0600 ' + remote) _rp[RPROP_UUID] = server_uuid _rp[RPROP_IP] = server_ip _rp[RPROP_USER] = username _rp[RPROP_PASS] = password server = get_resource(fco_api, server_uuid, RT.SERVER) _rp[RPROP_DISKS] = [d.resourceUUID for d in server.disks] _rp[RPROP_NICS] = [n.resourceUUID for n in server.nics] ctx.logger.info('Server IP: ' + server_ip) ctx.logger.info('Server User: '******'Server Password: ' + password) return server_uuid, server_ip, username, password
def configure(subject=None): subject = subject or ctx ctx.logger.info('Configuring clearwater node.') template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME)) ctx.logger.debug('Building a dict object that will contain variables ' 'to write to the Jinja2 template.') # Get the host public IP name = ctx.instance.id relationships = ctx.instance.relationships public_ip = '' for element in relationships: if element.type == 'cloudify.relationships.contained_in': for elements in element.target.instance.relationships: if elements.type == 'cloudify.openstack.server_connected_to_floating_ip': public_ip = elements.target.instance.runtime_properties['floating_ip_address'] # Get bind host IP binds = [] for element in relationships: text = element.target.instance.id if re.split(r'_',text)[0] == 'bind': binds.append(element.target.instance.host_ip) config = subject.node.properties.copy() config.update(dict( name=name.replace('_','-'), host_ip=subject.instance.host_ip, etcd_ip=binds[0], public_ip=public_ip)) ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH)) ctx.logger.debug('The config dict: {0}.'.format(config)) # Generate local_config file from jinja template with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mkdir -p /etc/clearwater', error_message='Failed to create clearwater config directory.') _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH), error_message='Failed to write to {0}.'.format(CONFIG_PATH)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH), error_message='Failed to change permissions {0}.'.format(CONFIG_PATH)) template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_NAMESERVER)) config = subject.node.properties.copy() config.update(dict(binds=binds)) # Generate dnsmasq file from jinja template with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_NAMESERVER), error_message='Failed to write to {0}.'.format(CONFIG_PATH_NAMESERVER)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_NAMESERVER), error_message='Failed to change permissions {0}.'.format(CONFIG_PATH_NAMESERVER))
def configure(subject=None): subject = subject or ctx # Get bind floating IP relationships = subject.instance.relationships public_ip = '' for element in relationships: if element.type == 'cloudify.relationships.contained_in': for elements in element.target.instance.relationships: if elements.type == 'cloudify.openstack.server_connected_to_floating_ip': public_ip = elements.target.instance.runtime_properties['floating_ip_address'] ctx.logger.info('Creating private domain file') template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_PRIVATE)) PRIVATE_DOMAIN = subject.node.properties['private_domain'] CONFIG_PATH_PRIVATE = '/etc/bind/db.{0}'.format(PRIVATE_DOMAIN) ctx.logger.debug('Building a dict object that will contain variables ' 'to write to the Jinja2 template.') config = subject.node.properties.copy() config.update(dict( backends=subject.instance.runtime_properties.get('backends', {}), host_ip=subject.instance.host_ip, public_ip=public_ip)) # Generate private domain file from jinja template ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_PRIVATE)) ctx.logger.debug('The config dict: {0}.'.format(config)) with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_PRIVATE), error_message='Failed to write to {0}.'.format(CONFIG_PATH_PRIVATE)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_PRIVATE), error_message='Failed to change permissions {0}.'.format(CONFIG_PATH_PRIVATE)) ctx.logger.info('Creating public domain file') PUBLIC_DOMAIN = subject.node.properties['public_domain'] CONFIG_PATH_PUBLIC = '/etc/bind/db.{0}'.format(PUBLIC_DOMAIN) ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH_PUBLIC)) ctx.logger.debug('The config dict: {0}.'.format(config)) template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_PUBLIC)) # Generate public domain file from jinja template with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_PUBLIC), error_message='Failed to write to {0}.'.format(CONFIG_PATH_PUBLIC)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_PUBLIC), error_message='Failed to change permissions {0}.'.format(CONFIG_PATH_PUBLIC)) # Reload bind server to reload new domain configuration reload()
del policy_triggers[trigger_type_name] def _process_source(source): split = source.split('://') schema = split[0] the_rest = ''.join(split[1:]) try: if schema in ['http', 'https']: return requests.get(source).text elif schema == 'file' and the_rest: with open(the_rest) as f: return f.read() except IOError, e: raise NonRecoverableError('Failed processing source: {} ({})' .format(source, e.message)) try: # try downloading blueprint resource return ctx.get_resource(source) except HttpException: pass try: # try downloading cloudify resource return get_resource_from_manager(source) except HttpException: pass raise NonRecoverableError('Failed processing source: {}' .format(source))
#!/usr/bin/python from cloudify import ctx ctx.instance.runtime_properties['index_content'] = \ ctx.get_resource(ctx.node.properties.get('index_file_path')).decode()
for trigger_type_name in triggers_to_remove: del policy_triggers[trigger_type_name] def _process_source(source): split = source.split('://') schema = split[0] the_rest = ''.join(split[1:]) try: if schema in ['http', 'https']: return requests.get(source).text elif schema == 'file' and the_rest: with open(the_rest) as f: return f.read() except IOError, e: raise NonRecoverableError('Failed processing source: {} ({})'.format( source, e.message)) try: # try downloading blueprint resource return ctx.get_resource(source) except HttpException: pass try: # try downloading cloudify resource return get_resource_from_manager(source) except HttpException: pass raise NonRecoverableError('Failed processing source: {}'.format(source))
def run(**kwargs): """main entry point for all calls""" calls = kwargs.get('calls', []) template = kwargs.get('template') templates = [] if template: templates = ctx.get_resource(template).split("]]>]]>") if not calls and not templates: ctx.logger.info("Please provide calls or template") return # credentials properties = ctx.node.properties netconf_auth = properties.get('netconf_auth', {}) netconf_auth.update(kwargs.get('netconf_auth', {})) user = netconf_auth.get('user') password = netconf_auth.get('password') key_content = netconf_auth.get('key_content') port = int(netconf_auth.get('port', 830)) ip_list = netconf_auth.get('ip') if isinstance(ip_list, basestring): ip_list = [ip_list] # save logs to debug file log_file_name = None if netconf_auth.get('store_logs'): log_file_name = "/tmp/netconf-%s_%s_%s.log" % (str( ctx.execution_id), str(ctx.instance.id), str(ctx.workflow_id)) ctx.logger.info("Communication logs will be saved to %s" % log_file_name) # if node contained in some other node, try to overwrite ip if not ip_list: ip_list = [ctx.instance.host_ip] ctx.logger.info("Used host from container: %s" % str(ip_list)) # check minimal amout of credentials if not port or not ip_list or not user or (not password and not key_content): raise cfy_exc.NonRecoverableError("please check your credentials") # some random initial message id, for have different between calls message_id = int((time.time() * 100) % 100 * 1000) # xml namespaces and capabilities xmlns = properties.get('metadata', {}).get('xmlns', {}) # override by system namespaces xmlns = _merge_ns(xmlns, properties.get('base_xmlns', {})) netconf_namespace, xmlns = utils.update_xmlns(xmlns) capabilities = properties.get('metadata', {}).get('capabilities') # connect ctx.logger.info("use %s@%s:%s for login" % (user, ip_list, port)) hello_string = _generate_hello(xmlns, netconf_namespace, capabilities) ctx.logger.info("i sent: " + hello_string) _write_to_log(log_file_name, hello_string) netconf = netconf_connection.connection() for ip in ip_list: try: capabilities = netconf.connect(ip, user, hello_string, password, key_content, port) ctx.logger.info("Will be used: " + ip) break except Exception as ex: ctx.logger.info("Can't connect to %s with %s" % (repr(ip), str(ex))) else: raise cfy_exc.NonRecoverableError("please check your ip list") ctx.logger.info("i recieved: " + capabilities) _write_to_log(log_file_name, capabilities) if _server_support_1_1(xmlns, netconf_namespace, capabilities): ctx.logger.info("i will use version 1.1 of netconf protocol") netconf.current_level = netconf_connection.NETCONF_1_1_CAPABILITY else: ctx.logger.info("i will use version 1.0 of netconf protocol") strict_check = kwargs.get('strict_check', True) if 'lock' in kwargs: message_id = message_id + 1 for name in kwargs['lock']: _lock(name, True, netconf, message_id, netconf_namespace, xmlns, strict_check, log_file_name) if 'back_database' in kwargs and 'front_database' in kwargs: message_id = message_id + 1 _copy(kwargs['front_database'], kwargs['back_database'], netconf, message_id, netconf_namespace, xmlns, strict_check, log_file_name=log_file_name) if calls: dsdl = properties.get('metadata', {}).get('dsdl') _run_calls(netconf, message_id, netconf_namespace, xmlns, calls, kwargs.get('back_database'), dsdl, strict_check, log_file_name=log_file_name) elif templates: template_params = kwargs.get('params') deep_error_check = kwargs.get('deep_error_check') ctx.logger.info("Params for template %s" % str(template_params)) _run_templates(netconf, templates, template_params, netconf_namespace, xmlns, strict_check, deep_error_check, log_file_name=log_file_name) if 'back_database' in kwargs and 'front_database' in kwargs: message_id = message_id + 1 _copy(kwargs['back_database'], kwargs['front_database'], netconf, message_id, netconf_namespace, xmlns, strict_check, log_file_name=log_file_name) if 'lock' in kwargs: message_id = message_id + 1 for name in kwargs['lock']: _lock(name, False, netconf, message_id, netconf_namespace, xmlns, strict_check, log_file_name=log_file_name) # goodbye ctx.logger.info("connection close") message_id = message_id + 1 goodbye_string = _generate_goodbye(xmlns, netconf_namespace, message_id) ctx.logger.info("i sent: " + goodbye_string) response = netconf.close(goodbye_string) ctx.logger.info("i recieved: " + response)
def create_keypair(keypair): if utils.should_use_external_resource(): keypair.private_key = ctx.get_resource(keypair.private_key_path) keypair.public_key = ctx.get_resource(keypair.public_key_path) else: keypair.create()
def run(**kwargs): """main entry point for all calls""" calls = kwargs.get('calls', []) if not calls: ctx.logger.info("No calls") return # credentials properties = ctx.node.properties terminal_auth = properties.get('terminal_auth', {}) terminal_auth.update(kwargs.get('terminal_auth', {})) ip = terminal_auth.get('ip') user = terminal_auth.get('user') password = terminal_auth.get('password') key_content = terminal_auth.get('key_content') port = terminal_auth.get('port', 22) if not ip or not user: raise cfy_exc.NonRecoverableError( "please check your credentials, ip or user not set") # additional settings global_promt_check = terminal_auth.get('promt_check') global_error_examples = terminal_auth.get('errors') exit_command = terminal_auth.get('exit_command', 'exit') # save logs to debug file log_file_name = None if terminal_auth.get('store_logs'): log_file_name = "/tmp/terminal-%s_%s_%s.log" % (str( ctx.execution_id), str(ctx.instance.id), str(ctx.workflow_id)) ctx.logger.info("Communication logs will be saved to %s" % log_file_name) connection = terminal_connection.connection() prompt = connection.connect(ip, user, password, key_content, port, global_promt_check, logger=ctx.logger, log_file_name=log_file_name) ctx.logger.info("Device prompt: " + prompt) for call in calls: responses = call.get('responses', []) promt_check = call.get('promt_check', global_promt_check) error_examples = call.get('errors', global_error_examples) # use action if exist operation = call.get('action', "") # use template if have if not operation and 'template' in call: template_name = call.get('template') template_params = call.get('params') template = ctx.get_resource(template_name) if not template: ctx.logger.info("Empty template.") continue template_engine = Template(template) if not template_params: template_params = {} # save context for reuse in template template_params['ctx'] = ctx operation = template_engine.render(template_params) # incase of template_text if not operation and 'template_text' in call: template_params = call.get('params') template = call.get('template_text') if not template: ctx.logger.info("Empty template_text.") continue template_engine = Template(template) if not template_params: template_params = {} # save context for reuse in template template_params['ctx'] = ctx operation = template_engine.render(template_params) if not operation: continue if responses: ctx.logger.info("We have predefined responses: " + str(responses)) ctx.logger.info("Template: \n" + str(operation)) result = "" for op_line in operation.split("\n"): # skip empty lines if not op_line.strip(): continue ctx.logger.info("Execute: " + op_line) result_part = connection.run(op_line, promt_check, error_examples, responses) if result_part.strip(): ctx.logger.info(result_part.strip()) result += (result_part + "\n") # save results to runtime properties save_to = call.get('save_to') if save_to: ctx.logger.info("For save: " + result.strip()) ctx.instance.runtime_properties[save_to] = result.strip() while not connection.is_closed() and exit_command: ctx.logger.info("Execute close") result = connection.run(exit_command, promt_check, error_examples) ctx.logger.info("Result of close: " + result) connection.close()
def configure(subject=None): import pip pip.main(['install', 'pysnmp==4.2.5']) subject = subject or ctx ctx.logger.info('Configuring clearwater node.') template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME)) timezone = ctx.node.properties.get('timezone') if timezone: ctx.logger.info('Set time zone: %s.' % timezone) _run('sudo timedatectl set-timezone %s' % timezone, error_message='Cannot set time zone') ctx.logger.debug('Building a dict object that will contain variables ' 'to write to the Jinja2 template.') # Get the host public IP name = ctx.instance.id relationships = ctx.instance.relationships host_ip = commands.getoutput("/sbin/ifconfig").split( "\n")[1].split()[1][5:] public_ip = inputs['public_ip'] # Get bind host IP binds = [] for element in relationships: text = element.target.instance.id if re.split(r'_', text)[0] == 'bind': binds.append(element.target.instance.host_ip) config = subject.node.properties.copy() config.update( dict(name=name.replace('_', '-'), host_ip=host_ip, etcd_ip=binds[0], public_ip=public_ip)) ctx.logger.debug( 'Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH)) ctx.logger.debug('The config dict: {0}.'.format(config)) # Generate local_config file from jinja template with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run('sudo mkdir -p /etc/clearwater', error_message='Failed to create clearwater config directory.') _run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH), error_message='Failed to write to {0}.'.format(CONFIG_PATH)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH), error_message='Failed to change permissions {0}.'.format(CONFIG_PATH)) template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME_NAMESERVER)) config = subject.node.properties.copy() config.update(dict(binds=binds)) # Generate dnsmasq file from jinja template with tempfile.NamedTemporaryFile(delete=False) as temp_config: temp_config.write(template.render(config)) _run( 'sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH_NAMESERVER), error_message='Failed to write to {0}.'.format(CONFIG_PATH_NAMESERVER)) _run('sudo chmod 644 {0}'.format(CONFIG_PATH_NAMESERVER), error_message='Failed to change permissions {0}.'.format( CONFIG_PATH_NAMESERVER))
from cloudify import ctx from cloudify.state import ctx_parameters as inputs resource_path = inputs['resource_path'] ctx.logger.info('Getting resource: {0}'.format(resource_path)) resource = ctx.get_resource(resource_path) ctx.logger.info('Resource = "{0}"'.format(resource)) ctx.instance.runtime_properties['get_resource'] = resource ctx.logger.info('Downloading resource: {0}'.format(resource_path)) resource_file = ctx.download_resource(resource_path) with open(resource_file, 'r') as f: resource = f.read() ctx.logger.info('Resource = "{0}"'.format(resource)) ctx.instance.runtime_properties['download_resource'] = resource
def create(instance_type, image_id, name, external_ip, startup_script, scopes, tags, zone=None, can_ip_forward=False, additional_settings=None, **kwargs): props = ctx.instance.runtime_properties gcp_config = utils.get_gcp_config() script = '' if startup_script: if startup_script.get('type') == 'file': script = ctx.get_resource(startup_script.get('script')) elif startup_script.get('type') == 'string': script = startup_script.get('script') else: raise NonRecoverableError( 'invalid script type: {}'.format(startup_script.get('type'))) ctx.logger.info('The script is {0}'.format(str(startup_script))) ssh_keys = get_ssh_keys() network, subnetwork = utils.get_net_and_subnet(ctx) if zone: zone = props['zone'] = utils.get_gcp_resource_name(zone) else: if props.get('zone', False): zone = props['zone'] elif subnetwork: zone = props['zone'] = random.choice(constants.REGION_ZONES_FULL[ basename(utils.get_network_node(ctx) .instance.runtime_properties['region'])]) else: zone = props['zone'] = utils.get_gcp_resource_name( gcp_config['zone']) instance_name = utils.get_final_resource_name(name) instance = Instance( gcp_config, ctx.logger, name=instance_name, image=image_id, machine_type=instance_type, external_ip=external_ip, startup_script=script, scopes=scopes, tags=tags, ssh_keys=ssh_keys, network=network, subnetwork=subnetwork, zone=zone, can_ip_forward=can_ip_forward, additional_settings=additional_settings, ) utils.create(instance)