def __init__(self, **kwargs): self.server_info = {} self.timeout = kwargs.pop('timeout', TIMEOUT_DEFAULT) self.server_info.update(kwargs) # try to determine IP address from user provided subnet if self.server_info.get('subnet') or cfg.get(FILETRANSFER_SUBNET_CFG): # Get specific ip from subnet self.server_info['address'] = self._get_ip( self.server_info.get('subnet', cfg.get(FILETRANSFER_SUBNET_CFG))) # Extract name if provided self.name = None if 'name' in self.server_info: self.name = self.server_info.pop('name') # Extract testbed if provided self.testbed = None if 'testbed' in self.server_info: self.testbed = self.server_info.pop('testbed') # Extract synchro if provided to reduce number of file handles self.synchro = None if 'synchro' in self.server_info: self.synchro = self.server_info.pop('synchro')
def pre_job(self, job): self.shared_obj = self.runtime.synchro.dict() self.token = self.runtime.args.webex_token or cfg.get('webex.token') self.room = self.runtime.args.webex_room or cfg.get('webex.room') self.enabled = True logger.info(self.token) if not self.token and self.room: logger.info("SPARK_TOKEN or ROOM_ID not found in env, disabling")
def _load_parser_json(): '''get all parser data in json file''' try: mod = importlib.import_module('genie.libs.parser') parsers = os.path.join(mod.__path__[0], 'parsers.json') except Exception: parsers = '' if not os.path.isfile(parsers): log.warning('parsers.json does not exist, make sure you ' 'are running with latest version of ' 'genie.libs.parsers') parser_data = {} else: # Open all the parsers in json file with open(parsers) as f: parser_data = json.load(f) # check if provided external parser packages ext_parser_package = cfg.get(PYATS_EXT_PARSER, None) or \ os.environ.get(PYATS_EXT_PARSER.upper().replace('.', '_')) if ext_parser_package: ext = ExtendParsers(ext_parser_package) ext.extend() ext.output.pop('tokens', None) summary = ext.output.pop('extend_info', None) merge_dict(parser_data, ext.output, update=True) log.warning("External parser counts: {}\nSummary:\n{}".format( len(summary), json.dumps(summary, indent=2))) return parser_data
def __init__(self, **kwargs): self.server_info = {} self.timeout = kwargs.pop('timeout', TIMEOUT_DEFAULT) self.server_info.update(kwargs) if 'subnet' not in self.server_info: if 'address' in self.server_info: # Can use the address as the subnet if given (with netmask /32) self.server_info['subnet'] = self.server_info['address'] else: # Otherwise try looking in the pyats configuration self.server_info['subnet'] = cfg.get(FILETRANSFER_SUBNET_CFG) # Ensure FileServer has a subnet if not self.server_info.get('subnet'): raise TypeError('FileServer missing subnet') # Get specific ip from subnet self.server_info['address'] = self._get_ip(self.server_info['subnet']) # Extract name if provided self.name = None if 'name' in self.server_info: self.name = self.server_info.pop('name') # Extract testbed if provided self.testbed = None if 'testbed' in self.server_info: self.testbed = self.server_info.pop('testbed') # Extract synchro if provided to reduce number of file handles self.synchro = None if 'synchro' in self.server_info: self.synchro = self.server_info.pop('synchro')
def start_servers(testbed, log_dir, synchro=None): '''find and start all dynamic file transfer servers in the testbed ''' file_servers = [] # servers may not exist for name, server in getattr(testbed, 'servers', {}).items(): if server.get('dynamic'): # Make log dir if it doesn't already exist os.makedirs(log_dir, exist_ok=True) # Ensure dynamic server has a file copy protocol if not 'protocol' in server: protocol = cfg.get(FILETRANSFER_PROTOCOL_CFG) if not protocol: raise TypeError('Dynamic server %s missing "protocol"' % name) server['protocol'] = protocol # Set up log file for file transfer server if 'logfile' in server.get('custom', {}): server['logfile'] = server['custom']['logfile'] else: server['logfile'] = os.path.join( log_dir, '%s.%s.log' % (name, server['protocol'])) # Add multiprocessing manager to server kwargs if passed if synchro: server['synchro'] = synchro # Create file server file_servers.append( FileServer(testbed=testbed, name=name, **server)) # Start server file_servers[-1].__enter__() # Return list of started servers return file_servers
def post_job(self, job): # Get WebEx info from arguments or configuration token = self.runtime.args.webex_token or cfg.get('webex.token') space = self.runtime.args.webex_space or cfg.get('webex.space') email = self.runtime.args.webex_email or cfg.get('webex.email') if not token: logger.info('WebEx Token not given as argument or in config. No ' 'WebEx notification will be sent') return headers = { 'Authorization': 'Bearer {}'.format(token), 'Content-Type': 'application/json' } if not space and not email: logger.info('No Space ID or email specified, No WebEx Teams ' 'notification will be sent') return # Format message with info from job run msg = MESSAGE_TEMPLATE.format(job=job) # internal Cisco pyATS log upload link # (does not exist for external release) try: # Attempt to get path for TRADe logs if not self.runtime.runinfo.no_upload: msg += '\n\nView pyATS logs at: %s'\ % self.runtime.runinfo.log_url except AttributeError: pass try: host = job.runtime.env['host']['name'] # Determine if liveview is running if self.runtime.args.liveview and\ self.runtime.args.liveview_keepalive: # Liveview will set this to the assigned port if not specified port = self.runtime.args.liveview_port try: # Attempt to add a link using the host domain name addr = socket.getfqdn() socket.gethostbyname(addr) msg += '\n\nLogs can be viewed with the pyATS Log Viewer '\ 'at: http://%s:%s' % (addr, port) except OSError: msg += '\n\nLogs can be viewed in your browser by '\ 'connecting to %s with port %s' % (host, port) else: # Show command to run liveview archive = self.runtime.runinfo.archive_file if archive: msg += '\n\nRun the following command on %s to view logs '\ 'from this job: `pyats logs view %s --host 0.0.0.0`'\ % (host, archive) except AttributeError: pass # Build payload payload = {'markdown': msg} if space: payload['roomId'] = space elif email: payload['toPersonEmail'] = email logger.info('Sending WebEx Teams notification') try: # Attempt POST r = requests.post(MESSAGE_URL, data=json.dumps(payload), headers=headers) logger.debug('notification status: %s' % r.status_code) logger.debug(r.text) except Exception: logger.exception('Failed to send WebEx Teams notification:')
def pre_task(self, task): # warnings for deprecated arguments if self.runtime.args.health_sections: logger.warning( 'DeprecationWarning: --health-sections is deprecated in 21.6. Use --health-tc-sections' ) if self.runtime.args.health_uids: logger.warning( 'DeprecationWarning: --health-uids is deprecated in 21.6. Use --health-tc-uids' ) if self.runtime.args.health_groups: logger.warning( 'DeprecationWarning: --health-groups is deprecated in 21.6. Use --health-tc-groups' ) if self.runtime.args.health_webex: logger.warning( 'DeprecationWarning: --health-webex is deprecated in 21.7. Use --health-notify-webex' ) # after loading health file, add all the sections/actions in health yaml # will be added as global context processors to pyATS job. # In `health.py`, `health_dispatcher` is the code of context processor. # It's same concept with generator-style context-processors in pyATS. # the code before `yield` is pre-processor, after `yield`, it's post-processor # # reference in pyATS doc : https://pubhub.devnetcloud.com/media/pyats/docs/aetest/processors.html#context-processors # Skip if no testbed or no health_file if not runtime.testbed: if self.runtime.args.health_file: # show message when testbed yaml only is missed logger.warning( 'testbed yaml was not given, so pyATS health will not run') return # check if any health arguments are given without --health-file or --health-checks if not self.runtime.args.health_file and not self.runtime.args.health_checks: webex_args_list = [ x for x in dir(self.runtime.args) if 'health' in x ] webex_args_value_list = [] for arg in webex_args_list: webex_args_value_list.append(getattr(self.runtime.args, arg)) if any(webex_args_value_list): raise Exception( 'pyATS Health Check arguments were given, but mandatory --health-file or --health-checks is missed.' ) else: return logger.info('Pre-Task %s: pyATS Health' % task.taskid) # load health configuration with open(self.runtime.args.health_config or health_yamls.health_config) as f: health_config = yaml.safe_load(f) if health_config: runtime.health_results = runtime.synchro.dict() runtime.health_results.update(health_config) runtime.health_results['health_data'] = [] # convert from pyATS testbed to Genie testbed tb = testbed.load(runtime.testbed) # convert from pyATS testbed to Genie testbed loader = TriggerdatafileLoader(testbed=tb) # load pyats health file health_loaded = None # load via --health-file if self.runtime.args.health_file: health_loaded = loader.load(self.runtime.args.health_file) # load default template via --health-checks if self.runtime.args.health_checks and not health_loaded: with open(health_yamls.pyats_health_yaml) as f: health_loaded = loader.load(f.read()) # get `source` for pyATS Health processors and instantiate class source = health_loaded.get('pyats_health_processors', {}).get('source', {}) # check `reconnect` flag/parameters if 'reconnect' in health_loaded.setdefault('pyats_health_processors', {}): reconnect = health_loaded['pyats_health_processors']['reconnect'] if reconnect is None: # `reconnect` in yaml, but no params. pass empty dict reconnect = {} else: reconnect = None # check `health_settings` flag/parameters from health yaml if 'health_settings' in health_loaded.setdefault( 'pyats_health_processors', {}): health_settings = health_loaded['pyats_health_processors'][ 'health_settings'] if health_settings is None: # `reconnect` in yaml, but no params. pass empty dict health_settings = {} else: health_settings = {} def _evaluate_arguments(arg, variable, setting_name, exception_msg): checks_list = [] devices_list = [] if isinstance(arg, bool): variable[setting_name] = arg return # support both nargs and non-nargs for each_arg in arg if isinstance(arg, list) else [arg]: # support delimiter ',' instead of ' '(space) as well for item in re.split(r',(?![^\[]*[\]])', each_arg): if len(re.split(r':(?![^\[]*[\]])', item)) != 2: # healch_chckes if setting_name == 'checks': checks_list.append(item) continue # health_devices elif setting_name == 'devices': devices_list.append(item) continue else: raise Exception(exception_msg) k, v = re.split(r':(?![^\[]*[\]])', item) if not k or not v: # error out in case improper format given like `nxos:` or `nxos` # which is not key and value pair raise Exception(exception_msg) try: # change string to appropriate type v = ast.literal_eval(v) except Exception: pass variable.setdefault(setting_name, {})[k] = v # single value to pair. eg. cpu -> cpu: True if checks_list: for check in variable.get('checks', []): variable['checks'][check] = check in checks_list # single value to pair. eg. R3_nx -> R3_nx: nxos if devices_list: for device in devices_list: variable.setdefault( 'devices', {})[device] = runtime.testbed.devices[device].os # overwrite health_settings if health args are given to pyats command if self.runtime.args.health_remote_device: _evaluate_arguments( self.runtime.args.health_remote_device, health_settings, 'remote_device', exception_msg= 'Wrong format was given to `--health-remote-device`. Format would be `name:jump_host path:/tmp via:scp`.' ) if self.runtime.args.health_mgmt_vrf: _evaluate_arguments( self.runtime.args.health_mgmt_vrf, health_settings, 'mgmt_vrf', exception_msg= 'Wrong format was given to `--health-mgmt-vrf`. Format would be `iosxe:Mgmt-intf iosxr:None,nxos:management`.' ) if self.runtime.args.health_threshold: _evaluate_arguments( self.runtime.args.health_threshold, health_settings, 'threshold', exception_msg= 'Wrong format was given to `--health-threshold`. Format would be `cpu:90 memory:90`.' ) if self.runtime.args.health_show_logging_keywords: _evaluate_arguments( self.runtime.args.health_show_logging_keywords, health_settings, 'show_logging_keywords', exception_msg= "Wrong format was given to `--health-show-logging-keywords`. Format would be `\"iosxe:['traceback','Traceback']\" \"iosxr:['TRACEBACK']\"`." ) _evaluate_arguments( self.runtime.args.health_clear_logging, health_settings, 'clear_logging', exception_msg= "Wrong format was given to `--clear-logging`. No value required. This is flag. If provide, True(clear logging)." ) if self.runtime.args.health_core_default_dir: _evaluate_arguments( self.runtime.args.health_core_default_dir, health_settings, 'core_default_dir', exception_msg= "Wrong format was given to `--health-core-default-dir`. Format would be `\"iosxe:['bootflash:/core/'','harddisk:/core/']\" \"iosxr:['/misc/scratch/core']\"`." ) if self.runtime.args.health_checks: _evaluate_arguments( self.runtime.args.health_checks, health_settings, 'checks', exception_msg= "Wrong format was given to `--health-checks`. Format would be `cpu memory`." ) if self.runtime.args.health_devices: _evaluate_arguments( self.runtime.args.health_devices, health_settings, 'devices', exception_msg= "Wrong format was given to `--health-devices`. Format would be R1_xe:iosxe,R2_xr:iosxr,R3_nx:nxos`." ) # DEPRECATED. keep for backward compatibility _evaluate_arguments( self.runtime.args.health_webex, health_settings, 'webex', exception_msg= "(DEPRECATED. PLEASE USE --health-notify-webex which is equivalent)" ) _evaluate_arguments( self.runtime.args.health_notify_webex, health_settings, 'webex', exception_msg= "Wrong format was given to `--health-webex`. No value required. This is flag. If provide, True(webex notification enabled)." ) if self.runtime.args.health_notify_webex or self.runtime.args.health_webex: runtime.health_webex = {} runtime.health_webex[ 'token'] = self.runtime.args.webex_token or cfg.get( 'webex.token') runtime.health_webex[ 'space'] = self.runtime.args.webex_space or cfg.get( 'webex.space') runtime.health_webex[ 'email'] = self.runtime.args.webex_email or cfg.get( 'webex.email') if not runtime.health_webex['token']: raise Exception( 'WebEx Token not given as argument or in config. No ' 'WebEx notification will be sent') runtime.health_webex['headers'] = { 'Authorization': 'Bearer {}'.format(runtime.health_webex['token']), 'Content-Type': 'application/json' } if not runtime.health_webex['space'] and not runtime.health_webex[ 'email']: raise Exception( 'No Space ID or email specified, No WebEx Teams ' 'notification will be sent') # webex notification template/url runtime.health_webex['msg'] = MESSAGE_TEMPLATE runtime.health_webex['url'] = MESSAGE_URL if not source: # Block testcase when error is found raise Exception( "Couldn't find 'pyats_health_processors' section in health.yaml." ) # get class name of testcase in health yaml pkg_name = source.get('pkg', '') class_name = source.get('class', '') class_path_list = '.'.join([pkg_name, class_name]).split('.') module = importlib.import_module('.'.join(class_path_list[:-1])) class_ = getattr(module, class_path_list[-1]) # instantiate Health class which inherited from Blitz class # `health_dispacher` function from Health class will be used as processor health = class_() # get section names for pyATS Health processors section_names = Dq(health_loaded).get_values('test_sections') if not section_names: # Block testcase when error is found raise Exception("Couldn't find any 'test_sections'.") processors = task.kwargs.setdefault('processors', {}) # Try to add health section usage to telemetry data if INTERNAL: try: add_health_usage_data(section_names) except Exception as e: logger.debug("Encountered an unexpected error while adding " "health telemetry data: %s" % e) # loop by health items (sections) for section in section_names: for section_name, section_data in section.items(): # add processors to pyATS processor_decorator = ProcessorDecorator() processor_method = processor_decorator.context( func=health.health_dispatcher, name='pyATS Health Check {section_name}'.format( section_name=section_name)) processor = functools.partial( processor_method, # enable processor report report=True, # params for health dispatcher parameters={ 'reconnect': reconnect, 'health_settings': health_settings, 'name': section_name, 'data': section_data }) processors.setdefault('context', []).append(processor) # save `pyats_health.yaml` to runtime.directory for archive with open( "{rundir}/pyats_health.yaml".format( rundir=self.runtime.directory), 'w') as f: yaml.dump(health_loaded, f, Dumper = OrderedSafeDumper, default_flow_style = False)