def _mergetreejinja(src, dst, context): ''' Merge directory A to directory B, apply Jinja2 templating to both the file/folder names AND to the contents of the files :param src: The source path :type src: ``str`` :param dst: The destination path :type dst: ``str`` :param context: The dictionary to inject into the Jinja template as context :type context: ``dict`` ''' for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): log.info("Copying folder {0} to {1}".format(s, d)) if os.path.exists(d): _mergetreejinja(s, d, context) else: os.mkdir(d) _mergetreejinja(s, d, context) else: if item != TEMPLATE_FILE_NAME: d = Template(d).render(context) log.info("Copying file {0} to {1}".format(s, d)) with fopen(s, 'r') as source_file: src_contents = source_file.read() dest_contents = Template(src_contents).render(context) with fopen(d, 'w') as dest_file: dest_file.write(dest_contents)
def include_pillar_file(pillar_file_path, pillar_data): if os.path.isfile(pillar_file_path): try: with fopen(pillar_file_path, 'r') as cluster_config: pillar_data.update(yaml.safe_load(cluster_config)) except Exception: log.critical( 'Failed to load yaml from {0}.'.format(pillar_file_path)) return pillar_data
def sumfile(fpath): # Since we will be do'in this for small files, it should be ok fobj = fopen(fpath) m = md5() while True: d = fobj.read(8096) if not d: break m.update(d) return m.hexdigest()
def test_run(self): out = salt.utils.extend.run('test', 'test', 'this description', '.', False) self.out = out year = date.today().strftime('%Y') self.assertTrue(os.path.exists(out)) self.assertFalse(os.path.exists(os.path.join(out, 'template.yml'))) self.assertTrue(os.path.exists(os.path.join(out, 'directory'))) self.assertTrue( os.path.exists(os.path.join(out, 'directory', 'test.py'))) with fopen(os.path.join(out, 'directory', 'test.py'), 'r') as test_f: self.assertEqual(test_f.read(), year)
def conf(): ''' Parse GRUB conf file CLI Example:: salt '*' grub.conf ''' stanza = '' stanzas = [] instanza = 0 ret = {} pos = 0 try: with fopen(_detect_conf(), 'r') as _fp: for line in _fp: if line.startswith('#'): continue if line.startswith('\n'): instanza = 0 if 'title' in stanza: stanza += 'order {0}'.format(pos) pos += 1 stanzas.append(stanza) stanza = '' continue if line.startswith('title'): instanza = 1 if instanza == 1: stanza += line if instanza == 0: key, value = _parse_line(line) ret[key] = value if instanza == 1: if not line.endswith('\n'): line += '\n' stanza += line stanza += 'order {0}'.format(pos) pos += 1 stanzas.append(stanza) except (IOError, OSError) as exc: msg = "Could not read grub config: {0}" raise CommandExecutionError(msg.format(str(exc))) ret['stanzas'] = [] for stanza in stanzas: mydict = {} for line in stanza.strip().splitlines(): key, value = _parse_line(line) mydict[key] = value ret['stanzas'].append(mydict) return ret
def __copy_sysctl(self): ''' Copies an existing sysconf file and returns temp file path. Copied file will be restored in tearDown ''' # Create new temporary file path and open needed files org_conf = fopen(CONFIG, 'r') temp_path = mkstemp() temp_sysconf = open(temp_path, 'w') # write sysctl lines to temp file for line in org_conf: temp_sysconf.write(line) org_conf.close() temp_sysconf.close() return temp_path
def _persist_next_ac_chunk(next_chunk): ''' Persist next_chunk to execute as YAML in '_mgractionchains.conf' ''' _add_boot_time(next_chunk, "persist") f_storage_filename = _get_ac_storage_filenamepath() try: f_storage_dir = os.path.dirname(f_storage_filename) if not os.path.exists(f_storage_dir): os.makedirs(f_storage_dir) with fopen(f_storage_filename, "w") as f_storage: f_storage.write(yaml.dump(next_chunk)) except (IOError, yaml.scanner.ScannerError) as exc: err_str = "Error writing YAML from '{0}': {1}".format( f_storage_filename, exc) log.error(err_str) raise CommandExecutionError(err_str)
def ext_pillar(minion_id, pillar, config_file='/srv/salt/ext/pillar/minions.conf'): #config_file='/etc/salt/minions.conf'): ''' Load the yaml config and return minion specific configurations. ''' try: with fopen(config_file, 'r') as config: all_dict = yaml.safe_load(config) except Exception: log.critical('Failed to load yaml from {0}.'.format(config_file)) return {} pillar_dict = all_dict.get(minion_id, {}) env_name = pillar_dict['env'] try: std_env_name = env_maps[env_name.upper()] except Exception: std_env_name = env_name log.critical('Failed to get std env name, so assigning env. name') pillar_dict['stdenv'] = std_env_name cluster_id = pillar_dict.get('cluster', DEFAULT_CLUSTER_ID).lower() machine_id = minion_id.lower() saltenv = __opts__.get('environment', 'base') if not saltenv: saltenv = 'base' pillar_roots = __opts__['pillar_roots'][saltenv] # Look for node specific and cluster specific pillar files in all pillar roots for pillar_root in pillar_roots: pillar_dict = include_pillar_file( '{0}/environments/{1}.sls'.format(pillar_root, env_name), pillar_dict) pillar_dict = include_pillar_file( '{0}/clusters/{1}.sls'.format(pillar_root, cluster_id), pillar_dict) pillar_dict = include_pillar_file( '{0}/machines/{1}.sls'.format(pillar_root, machine_id), pillar_dict) return pillar_dict
def _write_proxy_conf(proxyfile): ''' write to file ''' msg = 'Invalid value for proxy file provided!, Supplied value = {0}' \ .format(proxyfile) log.trace('Salt Proxy Module: write proxy conf') if proxyfile: log.debug('Writing proxy conf file') with fopen(proxyfile, 'w') as proxy_conf: proxy_conf.write('master = {0}'.format(__grains__['master'])) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) return msg
def _get_template(path, option_key): ''' Get the contents of a template file and provide it as a module type :param path: path to the template.yml file :type path: ``str`` :param option_key: The unique key of this template :type option_key: ``str`` :returns: Details about the template :rtype: ``tuple`` ''' with fopen(path, "r") as template_f: template = deserialize(template_f) info = (option_key, template.get('description', ''), template) return info
def _write_proxy_conf(proxyfile): ''' write to file ''' msg = 'Invalid value for proxy file provided!, Supplied value = {0}' \ .format(proxyfile) log.trace('Salt Proxy Module: write proxy conf') if proxyfile: log.debug('Writing proxy conf file') with fopen(proxyfile, 'w') as proxy_conf: proxy_conf.write('master = {0}' .format(__grains__['master'])) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) return msg
def __get_system_group_gid_range(self): ''' Returns (SYS_GID_MIN, SYS_GID_MAX) ''' defs_file = '/etc/login.defs' if os.path.exists(defs_file): with utils.fopen(defs_file) as defs_fd: login_defs = dict([ x.split() for x in defs_fd.readlines() if x.strip() and not x.strip().startswith('#') ]) else: login_defs = {'SYS_GID_MIN': 101, 'SYS_GID_MAX': 999} gid_min = login_defs.get('SYS_GID_MIN', 101) gid_max = login_defs.get('SYS_GID_MAX', int(login_defs.get('GID_MIN', 1000)) - 1) return gid_min, gid_max
def _read_next_ac_chunk(clear=True): ''' Read and remove the content of '_mgractionchains.conf' file. Return the parsed YAML. ''' f_storage_filename = _get_ac_storage_filenamepath() if not os.path.isfile(f_storage_filename): return None ret = None try: with fopen(f_storage_filename, "r") as f_storage: ret = yaml.load(f_storage.read()) if clear: os.remove(f_storage_filename) return ret except (IOError, yaml.scanner.ScannerError) as exc: err_str = "Error processing YAML from '{0}': {1}".format( f_storage_filename, exc) log.error(err_str) raise CommandExecutionError(err_str)
def current_pty_count(): # Get current number of PTY's try: if os.path.exists('/proc/sys/kernel/pty/nr'): with fopen('/proc/sys/kernel/pty/nr') as fh_: return int(fh_.read().strip()) proc = subprocess.Popen( 'sysctl -a 2> /dev/null | grep pty.nr | awk \'{print $3}\'', shell=True, stdout=subprocess.PIPE) stdout, _ = proc.communicate() return int(stdout.strip()) except (ValueError, OSError, IOError): if is_darwin(): # We're unable to findout how many PTY's are open self.skipTest( 'Unable to find out how many PTY\'s are open on Darwin - ' 'Skipping for now') self.fail('Unable to find out how many PTY\'s are open')
def current_pty_count(): # Get current number of PTY's try: if os.path.exists('/proc/sys/kernel/pty/nr'): with fopen('/proc/sys/kernel/pty/nr') as fh_: return int(fh_.read().strip()) proc = subprocess.Popen( 'sysctl -a 2> /dev/null | grep pty.nr | awk \'{print $3}\'', shell=True, stdout=subprocess.PIPE ) stdout, _ = proc.communicate() return int(stdout.strip()) except (ValueError, OSError, IOError): if is_darwin(): # We're unable to findout how many PTY's are open self.skipTest( 'Unable to find out how many PTY\'s are open on Darwin - ' 'Skipping for now' ) self.fail('Unable to find out how many PTY\'s are open')
master_ip = '127.0.0.1' master_port = '4506' minion_config = { 'transport': 'zeromq', 'pki_dir': '/tmp', 'id': 'root', 'log_level': 'debug', 'master_ip': master_ip, 'master_port': master_port, 'auth_timeout': 5, 'auth_tries': 1, 'master_uri': 'tcp://{0}:{1}'.format(master_ip, master_port) } with fopen('/var/cache/salt/master/.root_key') as keyfd: root_key = keyfd.read() top_secret_file_path = '/tmp/salt_cve_teta' with fopen(top_secret_file_path, 'w') as fd: fd.write("top secret") clear_channel = salt.transport.client.ReqChannel.factory(minion_config, crypt='clear') # --- check funcs ---- def check_salt_version(): print("[+] Salt version: {0}".format(salt.version.__version__))
def file(name, source_hash='', user='******', template=None, context=None, replace=True, defaults=None, env=None, backup='', **kwargs): ''' Provides file.managed-like functionality (templating, etc.) for a pre-made crontab file, to be assigned to a given user. name The source file to be used as the crontab. This source file can be hosted on either the salt master server, or on an HTTP or FTP server. For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If the file is hosted on a HTTP or FTP server then the source_hash argument is also required source_hash This can be either a file which contains a source hash string for the source, or a source hash string. The source hash string is the hash algorithm followed by the hash of the file: md5=e138491e9d5b97023cea823fe17bac22 user The user to whom the crontab should be assigned. This defaults to root. template If this setting is applied then the named templating engine will be used to render the downloaded file. Currently, jinja and mako are supported. context Overrides default context variables passed to the template. replace If the crontab should be replaced, if False then this command will be ignored if a crontab exists for the specified user. Default is True. defaults Default context passed to the template. backup Overrides the default backup mode for the user's crontab. ''' # Initial set up mode = __salt__['config.manage_mode'](600) owner, group, crontab_dir = _get_cron_info() cron_path = mkstemp() with fopen(cron_path, 'w+') as fp_: fp_.write(__salt__['cron.raw_cron'](user)) ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} # Avoid variable naming confusion in below module calls, since ID # delclaration for this state will be a source URI. source = name if env is None: env = kwargs.get('__env__', 'base') if not replace and os.stat(cron_path).st_size > 0: ret['comment'] = 'User {0} already has a crontab. No changes ' \ 'made'.format(user) os.unlink(cron_path) return ret if __opts__['test']: fcm = __salt__['file.check_managed']( cron_path, source, source_hash, owner, group, mode, template, False, # makedirs = False context, defaults, env, **kwargs) ret['result'], ret['comment'] = fcm os.unlink(cron_path) return ret # If the source is a list then find which file exists source, source_hash = __salt__['file.source_list'](source, source_hash, env) # Gather the source file from the server sfn, source_sum, comment = __salt__['file.get_managed']( cron_path, template, source, source_hash, owner, group, mode, env, context, defaults, **kwargs) if comment: ret['comment'] = comment ret['result'] = False os.unlink(cron_path) return ret ret = __salt__['file.manage_file'](cron_path, sfn, ret, source, source_sum, owner, group, mode, env, backup) if ret['changes']: ret['changes'] = {'diff': ret['changes']['diff']} ret['comment'] = 'Crontab for user {0} was updated'.format(user) elif ret['result']: ret['comment'] = 'Crontab for user {0} is in the correct ' \ 'state'.format(user) cron_ret = __salt__['cron.write_cron_file_verbose'](user, cron_path) if cron_ret['retcode']: ret['comment'] = 'Unable to update user {0} crontab {1}.' \ ' Error: {2}'.format(user, cron_path, cron_ret['stderr']) ret['result'] = False os.unlink(cron_path) return ret
def managed(name, data, models, **kwargs): ''' Manage the device configuration given the input data strucuted according to the YANG models. data YANG structured data. models A list of models to be used when generating the config. profiles: ``None`` Use certain profiles to generate the config. If not specified, will use the platform default profile(s). test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: ``True`` Commit? Default: ``True``. debug: ``False`` Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` contaning the raw configuration loaded on the device. replace: ``False`` Should replace the config with the new generate one? State SLS example: .. code-block:: jinja {%- set expected_config = pillar.get('openconfig_interfaces_cfg') -%} interfaces_config: napalm_yang.managed: - data: {{ expected_config | json }} - models: - models.openconfig_interfaces - debug: true Pillar example: .. code-block:: yaml openconfig_interfaces_cfg: _kwargs: filter: true interfaces: interface: Et1: config: mtu: 9000 Et2: config: description: "description example" ''' ret = salt.utils.napalm.default_ret(name) test = kwargs.get('test', False) or __opts__.get('test', False) debug = kwargs.get('debug', False) or __opts__.get('debug', False) commit = kwargs.get('commit', True) or __opts__.get('commit', True) replace = kwargs.get('replace', False) or __opts__.get('replace', False) profiles = kwargs.get('profiles', []) temp_file = __salt__['temp.file']() log.debug('Creating temp file: {0}'.format(temp_file)) if 'to_dict' not in data: data = {'to_dict': data} data = [data] with fopen(temp_file, 'w') as file_handle: yaml.safe_dump(json.loads(json.dumps(data)), file_handle, encoding='utf-8', allow_unicode=True) device_config = __salt__['napalm_yang.parse'](models, config=True, profiles=profiles) log.debug('Parsed the config from the device:') log.debug(device_config) compliance_report = __salt__['napalm_yang.compliance_report']( device_config, models, filepath=temp_file) log.debug('Compliance report:') log.debug(compliance_report) complies = compliance_report.get('complies', False) if complies: ret.update({ 'result': True, 'comment': 'Already configured as required.' }) log.debug('All good here.') return ret log.debug('Does not comply, trying to generate and load config') data = data[0]['to_dict'] if '_kwargs' in data: data.pop('_kwargs') loaded_changes = __salt__['napalm_yang.load_config'](data, models, profiles=profiles, test=test, debug=debug, commit=commit, replace=replace) log.debug('Loaded config result:') log.debug(loaded_changes) __salt__['file.remove'](temp_file) return salt.utils.napalm.loaded_ret(ret, loaded_changes, test, debug)
def file(name, source_hash='', user='******', template=None, context=None, replace=True, defaults=None, env=None, backup='', **kwargs): ''' Provides file.managed-like functionality (templating, etc.) for a pre-made crontab file, to be assigned to a given user. name The source file to be used as the crontab. This source file can be hosted on either the salt master server, or on an HTTP or FTP server. For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If the file is hosted on a HTTP or FTP server then the source_hash argument is also required source_hash This can be either a file which contains a source hash string for the source, or a source hash string. The source hash string is the hash algorithm followed by the hash of the file: md5=e138491e9d5b97023cea823fe17bac22 user The user to whom the crontab should be assigned. This defaults to root. template If this setting is applied then the named templating engine will be used to render the downloaded file. Currently, jinja and mako are supported. context Overrides default context variables passed to the template. replace If the crontab should be replaced, if False then this command will be ignored if a crontab exists for the specified user. Default is True. defaults Default context passed to the template. backup Overrides the default backup mode for the user's crontab. ''' # Initial set up mode = __salt__['config.manage_mode'](600) owner, group, crontab_dir = _get_cron_info() cron_path = mkstemp() with fopen(cron_path, 'w+') as fp_: fp_.write(__salt__['cron.raw_cron'](user)) ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} # Avoid variable naming confusion in below module calls, since ID # delclaration for this state will be a source URI. source = name if env is None: env = kwargs.get('__env__', 'base') if not replace and os.stat(cron_path).st_size > 0: ret['comment'] = 'User {0} already has a crontab. No changes ' \ 'made'.format(user) os.unlink(cron_path) return ret if __opts__['test']: fcm = __salt__['file.check_managed'](cron_path, source, source_hash, owner, group, mode, template, False, # makedirs = False context, defaults, env, **kwargs ) ret['result'], ret['comment'] = fcm os.unlink(cron_path) return ret # If the source is a list then find which file exists source, source_hash = __salt__['file.source_list'](source, source_hash, env) # Gather the source file from the server sfn, source_sum, comment = __salt__['file.get_managed'](cron_path, template, source, source_hash, owner, group, mode, env, context, defaults, **kwargs ) if comment: ret['comment'] = comment ret['result'] = False os.unlink(cron_path) return ret ret = __salt__['file.manage_file'](cron_path, sfn, ret, source, source_sum, owner, group, mode, env, backup) if ret['changes']: ret['changes'] = {'diff': ret['changes']['diff']} ret['comment'] = 'Crontab for user {0} was updated'.format(user) elif ret['result']: ret['comment'] = 'Crontab for user {0} is in the correct ' \ 'state'.format(user) cron_ret = __salt__['cron.write_cron_file_verbose'](user, cron_path) if cron_ret['retcode']: ret['comment'] = 'Unable to update user {0} crontab {1}.' \ ' Error: {2}'.format(user, cron_path, cron_ret['stderr']) ret['result'] = False os.unlink(cron_path) return ret
def install_config(path=None, **kwargs): ''' Installs the given configuration file into the candidate configuration. Commits the changes if the commit checks or throws an error. Usage: .. code-block:: bash salt 'device_name' junos.install_config 'salt://production/network/routers/config.set' salt 'device_name' junos.install_config 'salt://templates/replace_config.conf' replace=True comment='Committed via SaltStack' salt 'device_name' junos.install_config 'salt://my_new_configuration.conf' dev_timeout=300 diffs_file='/salt/confs/old_config.conf' overwrite=True salt 'device_name' junos.install_config 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}' Parameters: Required * path: Path where the configuration/template file is present. If the file has a \ '*.conf' extension, the content is treated as text format. If the file has a '*.xml' \ extension, the content is treated as XML format. If the file has a '*.set' \ extension, the content is treated as Junos OS 'set' commands.(default = None) Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * overwrite: Set to True if you want this file is to completely replace the\ configuration file. (default = False) * replace: Specify whether the configuration file uses "replace:" statements. Those statements under the 'replace' tag will only be changed.\ (default = False) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option is specified, the commit will be rollbacked in \ the given time unless the commit is confirmed. * diffs_file: Path to the file where the diff (difference in old configuration and the committed configuration) will be stored.(default = None) Note that the file will be stored on the proxy minion. To push the files to the master use the salt's following execution module: \ :py:func:`cp.push <salt.modules.cp.push>` * template_vars: Variables to be passed into the template processing engine in addition to those present in __pillar__, __opts__, __grains__, etc. You may reference these variables in your template like so: {{ template_vars["var_name"] }} ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if path is None: ret['message'] = \ 'Please provide the salt path where the configuration is present' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) template_vars = dict() if "template_vars" in op: template_vars = op["template_vars"] template_cached_path = files.mkstemp() __salt__['cp.get_template']( path, template_cached_path, template_vars=template_vars) if not os.path.isfile(template_cached_path): ret['message'] = 'Invalid file path.' ret['out'] = False return ret if os.path.getsize(template_cached_path) == 0: ret['message'] = 'Template failed to render' ret['out'] = False return ret write_diff = '' if 'diffs_file' in op and op['diffs_file'] is not None: write_diff = op['diffs_file'] del op['diffs_file'] op['path'] = template_cached_path if 'format' not in op: if path.endswith('set'): template_format = 'set' elif path.endswith('xml'): template_format = 'xml' else: template_format = 'text' op['format'] = template_format if 'replace' in op and op['replace']: op['merge'] = False del op['replace'] elif 'overwrite' in op and op['overwrite']: op['overwrite'] = True elif 'overwrite' in op and not op['overwrite']: op['merge'] = True del op['overwrite'] try: conn.cu.load(**op) except Exception as exception: ret['message'] = 'Could not load configuration due to : "{0}"'.format( exception) ret['format'] = template_format ret['out'] = False return ret finally: safe_rm(template_cached_path) config_diff = conn.cu.diff() if config_diff is None: ret['message'] = 'Configuration already applied!' ret['out'] = True return ret commit_params = {} if 'confirm' in op: commit_params['confirm'] = op['confirm'] if 'comment' in op: commit_params['comment'] = op['comment'] try: check = conn.cu.commit_check() except Exception as exception: ret['message'] = \ 'Commit check threw the following exception: "{0}"'\ .format(exception) ret['out'] = False return ret if check: try: conn.cu.commit(**commit_params) ret['message'] = 'Successfully loaded and committed!' except Exception as exception: ret['message'] = \ 'Commit check successful but commit failed with "{0}"'\ .format(exception) ret['out'] = False return ret else: ret['message'] = 'Loaded configuration but commit check failed.' ret['out'] = False conn.cu.rollback() try: if write_diff and config_diff is not None: with fopen(write_diff, 'w') as fp: fp.write(config_diff) except Exception as exception: ret['message'] = 'Could not write into diffs_file due to: "{0}"'.format( exception) ret['out'] = False return ret
def cli(command=None, format='text', **kwargs): ''' Executes the CLI commands and returns the output in specified format. \ (default is text) The ouput can also be stored in a file. Usage: .. code-block:: bash salt 'device_name' junos.cli 'show system commit' salt 'device_name' junos.cli 'show version' dev_timeout=40 salt 'device_name' junos.cli 'show system alarms' 'xml' dest=/home/user/cli_output.txt Parameters: Required * command: The command that need to be executed on Junos CLI. (default = None) Optional * format: Format in which to get the CLI output. (text or xml, \ default = 'text') * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * dest: The destination file where the CLI output can be stored.\ (default = None) ''' conn = __proxy__['junos.conn']() # Cases like salt 'device_name' junos.cli 'show system alarms' '' # In this case the format becomes '' (empty string). And reply is sent in xml # We want the format to default to text. if not format: format = 'text' ret = dict() if command is None: ret['message'] = 'Please provide the CLI command to be executed.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) try: result = conn.cli(command, format, warning=False) except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False return ret if format == 'text': ret['message'] = result else: result = etree.tostring(result) ret['message'] = jxmlease.parse(result) if 'dest' in op and op['dest'] is not None: with fopen(op['dest'], 'w') as fp: fp.write(result) ret['out'] = True return ret
def rollback(id=0, **kwargs): ''' To rollback the last committed configuration changes Usage: .. code-block:: bash salt 'device_name' junos.rollback 10 Parameters: Optional * id: The rollback id value [0-49]. (default = 0) * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option \ is specified, the commit will be rollbacked in the given time \ unless the commit is confirmed. * diffs_file: Path to the file where any diffs will be written. (default = None) ''' ret = dict() conn = __proxy__['junos.conn']() op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) try: ret['out'] = conn.cu.rollback(id) except Exception as exception: ret['message'] = 'Rollback failed due to "{0}"'.format(exception) ret['out'] = False return ret if ret['out']: ret['message'] = 'Rollback successful' else: ret['message'] = 'Rollback failed' return ret if 'diffs_file' in op and op['diffs_file'] is not None: diff = conn.cu.diff() if diff is not None: with fopen(op['diffs_file'], 'w') as fp: fp.write(diff) else: log.info( 'No diff between current configuration and \ rollbacked configuration, so no diff file created') try: commit_ok = conn.cu.commit_check() except Exception as exception: ret['message'] = 'Could not commit check due to "{0}"'.format( exception) ret['out'] = False return ret if commit_ok: try: conn.cu.commit(**op) ret['out'] = True except Exception as exception: ret['out'] = False ret['message'] = \ 'Rollback successful but commit failed with error "{0}"'\ .format(exception) return ret else: ret['message'] = 'Rollback succesfull but pre-commit check failed.' ret['out'] = False return ret
def rpc(cmd=None, dest=None, format='xml', *args, **kwargs): ''' This function executes the rpc provided as arguments on the junos device. The returned data can be stored in a file whose destination can be specified with 'dest' keyword in the arguments. Usage: .. code-block:: bash salt 'device' junos.rpc 'get_config' 'text' filter='<configuration><system/></configuration>' salt 'device' junos.rpc 'get-interface-information' '/home/user/interface.log' interface_name='lo0' terse=True Options: * cmd: the rpc to be executed * dest: destination file where the rpc ouput is dumped * format: the format in which the rpc reply must be stored in file specified in the dest (used only when dest is specified) * args: other arguments as taken by rpc call of PyEZ * kwargs: keyworded arguments taken by rpc call of PyEZ ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True op = dict() if '__pub_arg' in kwargs: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) if dest is None and format != 'xml': log.warning( 'Format ignored as it is only used for output which is dumped in the file.' ) write_response = '' try: if cmd in ['get-config', 'get_config']: filter_reply = None if 'filter' in op: filter_reply = etree.XML(op['filter']) xml_reply = getattr(conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) ret['message'] = jxmlease.parse(etree.tostring(xml_reply)) write_response = etree.tostring(xml_reply) if dest is not None and format != 'xml': op.update({'format': format}) rpc_reply = getattr(conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) if format == 'json': write_response = json.dumps(rpc_reply, indent=1) else: write_response = rpc_reply.text else: xml_reply = getattr(conn.rpc, cmd.replace('-', '_'))(**op) ret['message'] = jxmlease.parse(etree.tostring(xml_reply)) write_response = etree.tostring(xml_reply) if dest is not None and format != 'xml': rpc_reply = getattr(conn.rpc, cmd.replace('-', '_'))({ 'format': format }, **op) if format == 'json': write_response = json.dumps(rpc_reply, indent=1) else: write_response = rpc_reply.text except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False if dest is not None: with fopen(dest, 'w') as fp: fp.write(write_response) return ret
def rpc(cmd=None, dest=None, format='xml', **kwargs): ''' This function executes the rpc provided as arguments on the junos device. The returned data can be stored in a file. Usage: .. code-block:: bash salt 'device' junos.rpc 'get_config' '/var/log/config.txt' 'text' filter='<configuration><system/></configuration>' salt 'device' junos.rpc 'get-interface-information' '/home/user/interface.xml' interface_name='lo0' terse=True salt 'device' junos.rpc 'get-chassis-inventory' Parameters: Required * cmd: The rpc to be executed. (default = None) Optional * dest: Destination file where the rpc ouput is stored. (default = None) Note that the file will be stored on the proxy minion. To push the files to the master use the salt's following execution module: :py:func:`cp.push <salt.modules.cp.push>` * format: The format in which the rpc reply is received from the device. (default = xml) * kwargs: keyworded arguments taken by rpc call like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default= 30 seconds) * filter: Only to be used with 'get-config' rpc to get specific configuration. * terse: Amount of information you want. * interface_name: Name of the interface whose information you want. ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if cmd is None: ret['message'] = 'Please provide the rpc to execute.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) op['dev_timeout'] = str(op.pop('timeout', conn.timeout)) if cmd in ['get-config', 'get_config']: filter_reply = None if 'filter' in op: filter_reply = etree.XML(op['filter']) del op['filter'] op.update({'format': format}) try: reply = getattr( conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) except Exception as exception: ret['message'] = 'RPC execution failed due to "{0}"'.format( exception) ret['out'] = False return ret else: op['dev_timeout'] = int(op['dev_timeout']) if 'filter' in op: log.warning( 'Filter ignored as it is only used with "get-config" rpc') try: reply = getattr( conn.rpc, cmd.replace('-', '_'))({'format': format}, **op) except Exception as exception: ret['message'] = 'RPC execution failed due to "{0}"'.format( exception) ret['out'] = False return ret if format == 'text': # Earlier it was ret['message'] ret['rpc_reply'] = reply.text elif format == 'json': # Earlier it was ret['message'] ret['rpc_reply'] = reply else: # Earlier it was ret['message'] ret['rpc_reply'] = jxmlease.parse(etree.tostring(reply)) if dest: if format == 'text': write_response = reply.text elif format == 'json': write_response = json.dumps(reply, indent=1) else: write_response = etree.tostring(reply) with fopen(dest, 'w') as fp: fp.write(write_response) return ret
def rpc(cmd=None, dest=None, format='xml', *args, **kwargs): ''' This function executes the rpc provided as arguments on the junos device. The returned data can be stored in a file whose destination can be specified with 'dest' keyword in the arguments. Usage: .. code-block:: bash salt 'device' junos.rpc 'get_config' 'text' filter='<configuration><system/></configuration>' salt 'device' junos.rpc 'get-interface-information' '/home/user/interface.log' interface_name='lo0' terse=True Options: * cmd: the rpc to be executed * dest: destination file where the rpc ouput is dumped * format: the format in which the rpc reply must be stored in file specified in the dest (used only when dest is specified) * args: other arguments as taken by rpc call of PyEZ * kwargs: keyworded arguments taken by rpc call of PyEZ ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True op = dict() if '__pub_arg' in kwargs: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) if dest is None and format != 'xml': log.warning( 'Format ignored as it is only used for output which is dumped in the file.') write_response = '' try: if cmd in ['get-config', 'get_config']: filter_reply = None if 'filter' in op: filter_reply = etree.XML(op['filter']) xml_reply = getattr( conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) ret['message'] = jxmlease.parse(etree.tostring(xml_reply)) write_response = etree.tostring(xml_reply) if dest is not None and format != 'xml': op.update({'format': format}) rpc_reply = getattr( conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) if format == 'json': write_response = json.dumps(rpc_reply, indent=1) else: write_response = rpc_reply.text else: xml_reply = getattr(conn.rpc, cmd.replace('-', '_'))(**op) ret['message'] = jxmlease.parse(etree.tostring(xml_reply)) write_response = etree.tostring(xml_reply) if dest is not None and format != 'xml': rpc_reply = getattr( conn.rpc, cmd.replace('-', '_'))({'format': format}, **op) if format == 'json': write_response = json.dumps(rpc_reply, indent=1) else: write_response = rpc_reply.text except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False if dest is not None: with fopen(dest, 'w') as fp: fp.write(write_response) return ret