def main(): #load testbed testbed = load('testbed.yaml') #connect and suppress output testbed.connect(log_stdout=False) #use pcall to execute on all devices in parallel pcall(get_ospf, hostname=testbed.devices, dev=testbed.devices.values())
def get_neigbor_data(self): '''Takes a testbed and processes the cdp and lldp data of every device on the testbed that has not yet been visited Returns: [{device:{'cdp':DATA, 'lldp':data}, device2:{'cdp':data,'lldp':data}}] ''' dev_to_test = [] dev_to_test_names = set() # if the device has not been visited add it to the list of devices to test # and then add it to list of devices that have been visited for device_name, device_obj in self.testbed.devices.items(): if device_name in self.visited_devices: continue self.visited_devices.add(device_name) if device_obj.os in self.supported_os and device_obj.connected: dev_to_test.append(self.testbed.devices[device_name]) dev_to_test_names.add(device_name) # use pcall to get cdp and lldp information for all devices in to test list if dev_to_test: result = pcall(self.get_neighbor_info, device = dev_to_test) return result else: return []
def parallel(self, steps, testbed, section, name, data): """ When called run all the actions below the keyword parallel concurrently """ pcall_payloads = [] pcall_returns = [] with steps.start('Executing actions in parallel', continue_=True) as steps: kwargs = { 'self': self, 'steps': steps, 'testbed': testbed, 'section': section, 'data': data, 'parallel': True } # call generator and store all action kwargs into pcall_payload for action_kwargs in callback_blitz_dispatcher_gen(**kwargs): pcall_payloads.append(action_kwargs) # Run actions in parallel pcall_returns = pcall(self.dispatcher, ikwargs=pcall_payloads) _parallel_results = _parallel(self, section, pcall_returns, steps) # Check for `continue: False` and go to exit if a section doesn't pass continues = [entry.get('continue_') for entry in pcall_returns] if not all(continues) and section.result != Passed: section.failed( 'Parallel section results is NOT passed, Stopping the testcase', goto=['exit']) return _parallel_results
def action_parallel(self, steps, testbed, data): # When called run all the actions # below the keyword parallel concurently pcall_payloads = [] with steps.start('Executing actions in parallel', continue_=True) as steps: for action_item in data: for action, action_kwargs in action_item.items(): # for future use - Enhancement needed in pyATS # with steps.start("Implementing action '{a}' in parallel".format(a=actions)) as step: step = Steps() kwargs = { 'steps': step, 'testbed': testbed, 'data': [{ action: action_kwargs }] } pcall_payloads.append(kwargs) pcall_returns = pcall(self.dispatcher, ikwargs=pcall_payloads) # Each action return is a dictionary containing the action name, possible saved_variable # Action results, and device name that action is being implemented on # These value would be lost when the child processor that executes the action end the process. # It is being implemented this way in order to add these values to the main processor. for each_return in pcall_returns: if each_return.get('saved_var'): saved_var_data = each_return['saved_var'][0] saved_var_name = each_return['saved_var'][1] save_variable(self, saved_var_data, saved_var_name) with steps.start('Executed {} on {} in parallel'.format( each_return['action'], each_return['device']), continue_=True) as report_step: getattr(report_step, str(each_return['step_result']))()
def save_bootvar(self, testbed): """Check boot information and save bootvar to startup-config Args: testbed (`obj`): Testbed object Returns: None Raises: pyATS Results """ # Create Summary summary = Summary(title='Summary', width=90) devices = [] for dev in self.parent.mapping_data['devices']: device = testbed.devices[dev] if device.type in EXCLUDED_DEVICE_TYPES: msg = " - This subsection is not supported for 'TGN' devices" summarize(summary, message=msg, device=dev) continue devices.append(device) device_dict = {} failed = False # We don't catch exceptions since failures will lead to passx in that # CommonSetup subsection asynchronous_boot_var_output = pcall(asynchronous_save_boot_variable, ckwargs={ 'self': self, 'device_dict': device_dict }, device=tuple(devices)) for item in asynchronous_boot_var_output: for dev, res in item.items(): if res == 'Failed': failed = True msg = " - Failed to save boot variable or copy "\ "running-config to startup-config" summarize(summary, message=msg, device=dev) elif res == 'Skipped': msg = " - Skipped saving boot variable or copy "\ "running-config to startup-config" summarize(summary, message=msg, device=dev) else: msg = " - Successfully saved boot variable" summarize(summary, message=msg, device=dev) summary.print() if failed: self.passx("Issue while saving boot variable on one of the devices, " "Check section summary for more details")
def configure_configs_in_parallel_on_device(self, config=None, devices=None): """ Configure a config in parallel on multiple devices. Devices is a ';' separated list of devices, e.g. "R1;R2" """ devices = [d.strip() for d in devices.split(';')] return pcall(self.configure_config_on_device_alias, config=[config] * len(devices), device=devices)
def action_parallel(self, steps, testbed, data): # When called run all the actions # below the keyword parallel concurently pcall_payloads = [] for action_item in data: for action, action_kwargs in action_item.items(): # for future use - Enhancement needed in pyATS # with steps.start("Implementing action '{a}' in parallel".format(a=actions)) as step: steps = Steps() kwargs = { 'steps': steps, 'testbed': testbed, 'data': [{ action: action_kwargs }] } pcall_payloads.append(kwargs) pcall(self.dispatcher, ikwargs=pcall_payloads)
def execute_commands_in_parallel_on_device(self, command=None, devices=None): """ Execute a command in parallel on multiple devices. Devices is a string ';' separated of devices, e.g. "R1;R2" """ devices = [d.strip() for d in devices.split(';')] return pcall(self.execute_command_on_device, command=[command] * len(devices), device=devices)
def go_to_rommon(self, steps, device, save_system_config=SAVE_SYSTEM_CONFIG): with steps.start("Bring device {} down to rommon> prompt prior to TFTP boot". \ format(device.name)) as step: reload_dialog = Dialog([ Statement(pattern=r".*System configuration has been modified\. Save\? \[yes\/no\].*", action='sendline(yes)' if save_system_config else 'sendline(no)', loop_continue=True, continue_timer=False), Statement(pattern=r".*Proceed with reload\? \[confirm\].*", action='sendline()', loop_continue=False, continue_timer=False), ]) # Using sendline, as we dont want unicon boot to kick in and send "boot" # to the device. Cannot use device.reload() directly as in case of HA, # we need both sup to do the commands device.sendline('reload') reload_dialog.process(device.spawn) if device.is_ha: def reload_check(device, target): device.expect(['(.*Initializing Hardware.*|^(.*)((rommon(.*))+>|switch *:).*$)'], target=target, timeout=90) # check if device is a stack device(stack with 2 memebrs is similar to HA devices) if len(device.subconnections) > 2: pcall(reload_check, cargs= (device,), iargs=[[alias] for alias in device.connections.defaults.connections]) else: pcall(reload_check, ckwargs={'device': device}, ikwargs=[{'target': 'active'}, {'target': 'standby'}]) else: device.expect(['(.*Initializing Hardware.*|^(.*)((rommon(.*))+>|switch *:).*$)'], timeout=60) log.info("Device is reloading") device.destroy_all()
def _actions_execute_in_loop_in_parallel(self, section, payload, ret_list, steps): if not payload: return try: pcall_returns = pcall(self.dispatcher, ikwargs=payload) except Exception: steps.errored("Unable to execute actions concurrently") ret_list.append(_parallel(self, section, pcall_returns, steps))
def device_parallel(self, steps, testbed, data): # When called run all the devices # below the keyword parallel concurently parallel_dict = data.pop('parallel', None) pcall_payloads = [] for key, value in parallel_dict.items(): for item in value: setup_dict = copy.deepcopy(data) setup_dict[key] = [item] for dev in item.keys(): # for future use # with steps.start("Running devices '{d}' in parallel".format(d=dev)) as step: steps = Steps() kwargs = { 'steps': steps, 'testbed': testbed, 'data': setup_dict } pcall_payloads.append(kwargs) pcall(self.dispatcher, ikwargs=pcall_payloads)
def parallel(self, steps, testbed, section, name, data): # When called run all the actions # below the keyword parallel concurrently pcall_payloads = [] pcall_returns = [] with steps.start('Executing actions in parallel', continue_=True) as steps: # If parallel keyword is insdie a condition block. # We might want to apply a function if the condition met # this if condition does that if 'parallel_conditioned' in data[0]: run_condition = data[0]['parallel_conditioned'] if run_condition['if']: getattr( steps, run_condition['function'] )('Condition is met the parallel block step result is set to {f}' .format(run_condition['function'])) else: data.pop(0) for action_item in data: for action, action_kwargs in action_item.items(): # for future use - Enhancement needed in pyATS # with steps.start("Implementing action '{a}' in parallel".format(a=actions)) as step: # on parallel it is not possible to set continue to False and benefit from that feature step = Steps() # Making sure that if a device is connected just the name of the device gets passed by to dispatcher if 'device' in action_kwargs: if not isinstance( action_kwargs['device'], str) and action_kwargs['device'].connected: action_kwargs['device'] = action_kwargs['device'].name elif action_kwargs['device'] not in testbed.devices: action_kwargs.update({'self': self}) action_kwargs = get_variable(**action_kwargs) action_kwargs.pop('self') kwargs = { 'steps': step, 'testbed': testbed, 'section': section, 'name': name, 'data': [{ action: action_kwargs }] } pcall_payloads.append(kwargs) pcall_returns = pcall(self.dispatcher, ikwargs=pcall_payloads) return _parallel(self, pcall_returns, steps)
def main(testbed, log): tb = get_testbed(testbed) client = GenieClient(tb, log=log) devices = client.devices startTime = datetime.now() pcall_task = pcall(update_interface_desc, device=devices.values()) total_time = (datetime.now() - startTime) print( 'Script took {} to update the device\'s interfaces'.format(total_time))
def save_device_config(self): '''Saves the device's in the testbed running config to the start up config''' def write_mem(devices): print(f'Running commands on {devices}...') cmd = "write memory" output = devices.execute(cmd) print(f'\nOutput of {cmd} on {devices}\n' + output + '\n') #Using pcall to run the above function in Parallel write_config = pcall(write_mem,devices=self.devices.values())
def action_parallel(self, steps, testbed, section, data): # When called run all the actions # below the keyword parallel concurently pcall_payloads = [] with steps.start('Executing actions in parallel', continue_=True) as steps: for action_item in data: for action, action_kwargs in action_item.items(): # for future use - Enhancement needed in pyATS # with steps.start("Implementing action '{a}' in parallel".format(a=actions)) as step: # on parallel it is not possible to set continue to False and benefit from that feature step = Steps() kwargs = { 'steps': step, 'testbed': testbed, 'section': section, 'data': [{ action: action_kwargs }] } pcall_payloads.append(kwargs) pcall_returns = pcall(self.dispatcher, ikwargs=pcall_payloads) # Each action return is a dictionary containing the action name, possible saved_variable # Action results, and device name that action is being implemented on # These value would be lost when the child processor that executes the action end the process. # It is being implemented this way in order to add these values to the main processor. for each_return in pcall_returns: if each_return.get('saved_vars'): for saved_var_name, saved_var_data in each_return.get( 'saved_vars').items(): if each_return.get('filters'): log.info('Applied filter: {} to the action {} output'. format(each_return['filters'], action)) save_variable(self, saved_var_data, saved_var_name) if each_return['device']: msg = 'Executed action {action} on {device} in parallel'.format( action=each_return['action'], device=each_return['device']) else: msg = 'Executed action {action} in parallel'.format( action=each_return['action']) with steps.start( msg, continue_=True, description=each_return['description']) as report_step: log.info('Check above for detailed action report') getattr(report_step, str(each_return['step_result']))()
def tftp_boot(self, steps, device, ip_address, subnet_mask, gateway, tftp_server, image, timeout=TIMEOUT, recovery_password=RECOVERY_PASSWORD, recovery_username=RECOVERY_USERNAME,recovery_en_pasword=None): with steps.start("Begin TFTP boot of device {}".format(device.name)) as step: # Need to instantiate to get the device.start # The device.start only works because of a|b device.instantiate(connection_timeout=timeout) tftp_boot = {'ip_address': ip_address, 'subnet_mask': subnet_mask, 'gateway': gateway, 'tftp_server': tftp_server, 'image': image} try: abstract = Lookup.from_device(device, packages={'clean': clean}) # Item is needed to be able to know in which parallel child # device.start only gets filled with single rp devices # for multiple rp devices we need to use subconnections if device.is_ha and hasattr(device, 'subconnections'): start = [i.start[0] for i in device.subconnections] else: start = device.start result = pcall(abstract.clean.recovery.recovery.recovery_worker, start=start, ikwargs=[{'item': i} for i, _ in enumerate(start)], ckwargs= \ {'device': device, 'timeout': timeout, 'tftp_boot': tftp_boot, 'break_count': 0, # Irrelevant as we will not use this pattern anyway # But needed for the recovery 'console_activity_pattern': '\\.\\.\\.\\.', 'golden_image': None, 'recovery_username': recovery_username, 'recovery_en_pasword':recovery_en_pasword, 'recovery_password': recovery_password}) except Exception as e: log.error(str(e)) step.failed("Failed to TFTP boot the device '{}'". \ format(device.name), ) else: log.info("Successfully performed TFTP boot on device '{}'". \ format(device.name))
def main(): startTime = datetime.now() testbed = create_device_inventory() #from dynamic_testbed file #pprint.pprint(testbed) tb = load(testbed) client = GenieClient(tb, log=False) #saves the devices configuration devices = client.devices.values() device_config_dir = create_config_dir() pcall_task = pcall( save_config_to_file, device=devices, ) total_time = (datetime.now() - startTime) print('Script took {} to complete'.format(total_time))
def verify_ntp_time(device, target, max_time=90, check_interval=15): """ Verify ntp clock is same on two devices Args: device (`obj`): Device object target (`obj`): Device object max_time (int): Maximum wait time for the trigger, in second. Default: 90 check_interval (int): Wait time between iterations when looping is needed, in second. Default: 15 Returns: result (`bool`): Verified result """ def clock(dev): return dev.parse("show clock") timeout = Timeout(max_time, check_interval) while timeout.iterate(): t1, t2 = pcall(clock, dev=[device, target]) log.info("Device: {device} Clock: {t1}\n" "Device: {target} Clock: {t2}".format(device=device.name, target=target.name, t1=t1, t2=t2)) p = re.compile(r"\d+:\d+:\d+") for key in t1: if "time" == key: a = time_to_int(p.search(t1[key]).group()) b = time_to_int(p.search(t2[key]).group()) result = abs(a - b) <= 1 else: result = t1[key] == t2[key] if not result: break else: return True timeout.sleep() return False
def saving_config(self, steps, testbed, exclude_config_that_matches_regex): with steps.start("Gathering the running configuration from all " "devices in the testbed file") as step: # Get all devices in the testbed devices = list(testbed.devices.values()) try: # Use pcall to get configuration from all devices in parallel configurations = pcall( self.get_config, dev=devices, ckwargs={'exclude': exclude_config_that_matches_regex}) except Exception as e: step.failed("Issue occurred while gathering running " "configuration.\nError: {}".format(str(e))) step.passed("Gathered the running configuration for: {}".format([ name for name, config in configurations if isinstance(config, str) ])) with steps.start("Saving all running configurations that were " "gathered"): for dev, config in configurations: # Skip any devices where the api to gather the running # config does not exist if isinstance(config, AttributeError): log.warning(config) continue filename = 'config_backup_{}.txt'.format(dev) with open(filename, 'w') as f: f.write(config) log.info( "Saved '{dev}' running configuration to '{file}'".format( dev=dev, file=filename))
def configure_testbed_lldp_protocol(self): ''' Method checks if lldp configuration is necessary for all devices in the testbed and if needed calls the cdp configuration method for the target devices in parallel ''' # Check which device to configure lldp on device_to_configure = [] for device_name, device_obj in self.testbed.devices.items(): if device_name in self.visited_devices or device_name in self.lldp_configured or not device_obj.connected or device_obj.os not in self.supported_os: continue device_to_configure.append(device_obj) # No device to configure if not device_to_configure: return # Configure lldp on these device res = pcall(self.configure_device_lldp_protocol, device= device_to_configure) for result in res: if result[1]: self.lldp_configured.add(result[0])
def pre_job(self, task): '''Try to connect to all the topology devices in parallel and make sure they are up and running before executing the test script. ''' # Check for the argument controlling the plugin run (Checking devices) check_devices_up = self.runtime.args.all_devices_up if not check_devices_up: log.info("Checking all devices are up and ready is disabled, '--check-all-devices-up' " "must be set to True in case of pyats runs or '-check_all_devices_up' set to " "True in case of legacy easypy runs") return else: log.info("TopologyUp Plugin is enabled, will start the plugin checking for all " "the devices' connectivity!") # Set the timers start_time = time() timeout = self.runtime.args.connection_check_timeout interval = self.runtime.args.connection_check_interval log.info("Connectivity check timeout is '{timeout}' and " "connectivity check interval is '{interval}'".format(timeout=timeout, interval=interval)) # Trying to connect to all devices in parallel pcall_output = pcall(device_connect, ckwargs = {'start_time': start_time, 'timeout': timeout, 'interval': interval}, ikwargs = [{'device':self.runtime.testbed.devices[dev]} for dev in self.runtime.testbed.devices]) if not (pcall_output[0] and pcall_output[1]): # Terminate testscript raise Exception ("Not all the testbed devices are up and ready") else: log.info("All devices are up and ready, Connected succesfully!") return
def parallel(self, steps, testbed, section, name, data): """ When called run all the actions below the keyword parallel concurrently """ pcall_payloads = [] pcall_returns = [] with steps.start('Executing actions in parallel', continue_=True) as steps: kwargs = { 'self': self, 'steps': steps, 'testbed': testbed, 'section': section, 'data': data, 'parallel': True } # call generator and store all action kwargs into pcall_payload for action_kwargs in callback_blitz_dispatcher_gen(**kwargs): pcall_payloads.append(action_kwargs) pcall_returns = pcall(self.dispatcher, ikwargs=pcall_payloads) return _parallel(self, section, pcall_returns, steps)
def tftp_boot(section, steps, device, ip_address, subnet_mask, gateway, tftp_server, image, recovery_password=None, save_system_config=True, timeout=600, config_reg_timeout=30): """ This stage boots a new image onto your device using the tftp booting method. Stage Schema ------------ tftp_boot: image (list): Image to boot with ip_address (list): Management ip address to configure to reach to the tftp server subnet_mask (str): Management subnet mask gateway (str): Management gateway tftp_server (str): Tftp server that is reachable with management interface recovery_password (str, optional): Enable password for device required after bootup. Defaults to None. save_system_config (bool, optional): Whether or not to save the system config if it was modified. Defaults to True. timeout (int, optional): Max time during which tftp boot must complete. Defaults to 600. config_reg_timeout (int, optional): Max time to set config-register. Defaults to 30. Example ------- tftp_boot: image: - /auto/some-location/that-this/image/stay-isr-image.bin ip_address: [10.1.7.126, 10.1.7.127] gateway: 10.1.7.1 subnet_mask: 255.255.255.0 tftp_server: 11.1.7.251 recovery_password: nbv_12345 save_system_config: False timeout: 600 config_reg_timeout: 10 There is more than one ip address, one for each supervisor. """ log.info("Section steps:" "\n1- Set config-register to 0x0" "\n2- Bring device down to rommon> prompt prior to TFTP boot" "\n3- Begin TFTP boot" "\n4- Reconnect to device after TFTP boot" "\n5- Reset config-register to 0x2101" "\n6- Execute 'write memory'") # Set config-register to 0x0 with steps.start("Set config-register to 0x0 on {}".format( device.name)) as step: try: device.api.execute_set_config_register(config_register='0x0', timeout=config_reg_timeout) except Exception as e: step.failed( "Unable to set config-register to 0x0 prior to TFTP" " boot on {}".format(device.name), ) # Bring the device down to rommon> prompt prior to TFTP boot with steps.start("Bring device {} down to rommon> prompt prior to TFTP boot".\ format(device.name)) as step: reload_dialog = Dialog([ Statement( pattern= r".*System configuration has been modified\. Save\? \[yes\/no\].*", action='sendline(yes)' if save_system_config else 'sendline(no)', loop_continue=True, continue_timer=False), Statement(pattern=r".*Proceed with reload\? \[confirm\].*", action='sendline()', loop_continue=False, continue_timer=False), ]) # Using sendline, as we dont want unicon boot to kick in and send "boot" # to the device. Cannot use device.reload() directly as in case of HA, # we need both sup to do the commands device.sendline('reload') reload_dialog.process(device.spawn) if device.is_ha: def reload_check(device, target): device.expect(['.*Initializing Hardware.*'], target=target, timeout=60) pcall(reload_check, ckwargs={'device': device}, ikwargs=[{ 'target': 'active' }, { 'target': 'standby' }]) else: device.expect(['.*Initializing Hardware.*'], timeout=60) log.info("Device is reloading") device.destroy_all() # Begin TFTP boot of device with steps.start("Begin TFTP boot of device {}".format( device.name)) as step: # Need to instantiate to get the device.start # The device.start only works because of a|b device.instantiate(connection_timeout=timeout) tftp_boot = { 'ip_address': ip_address, 'subnet_mask': subnet_mask, 'gateway': gateway, 'tftp_server': tftp_server, 'image': image } try: abstract = Lookup.from_device(device, packages={'clean': clean}) # Item is needed to be able to know in which parallel child # device.start only gets filled with single rp devices # for multiple rp devices we need to use subconnections if device.is_ha and hasattr(device, 'subconnections'): start = [i.start[0] for i in device.subconnections] else: start = device.start result = pcall(abstract.clean.recovery.recovery.recovery_worker, start=start, ikwargs = [{'item': i} for i, _ in enumerate(start)], ckwargs = \ {'device': device, 'timeout': timeout, 'tftp_boot': tftp_boot, 'break_count': 0, # Irrelevant as we will not use this pattern anyway # But needed for the recovery 'console_activity_pattern': '\\.\\.\\.\\.', 'golden_image': None, 'recovery_password': recovery_password}) except Exception as e: log.error(str(e)) step.failed("Failed to TFTP boot the device '{}'".\ format(device.name), ) else: log.info("Successfully performed TFTP boot on device '{}'".\ format(device.name)) # Disconnect and reconnect to the device with steps.start("Reconnect to device {} after TFTP boot".\ format(device.name)) as step: if not _disconnect_reconnect(device): # If that still doesnt work, Thats all we got step.failed( "Cannot reconnect to the device {d} after TFTP boot".format( d=device.name), ) else: log.info("Success - Have recovered and reconnected to device '{}'".\ format(device.name)) # Reset config-register to 0x2101 with steps.start("Reset config-register to 0x2101 on {}".\ format(device.name)) as step: try: device.api.execute_set_config_register(config_register='0x2102', timeout=config_reg_timeout) except Exception as e: log.error(str(e)) step.failed( "Unable to reset config-register to 0x2101 after TFTP" " boot on {}".format(device.name), ) # Execute 'write memory' with steps.start("Execute 'write memory' on {}".format( device.name)) as step: try: device.api.execute_write_memory() except Exception as e: log.error(str(e)) step.failed( "Unable to execute 'write memory' after TFTP boot " "on {}".format(device.name), ) else: section.passed("Successfully performed TFTP boot on device {}".\ format(device.name))
def tftp_boot(section, steps, device, ip_address, subnet_mask, gateway, tftp_server, image, recovery_password=None, save_system_config=True, timeout=600, config_reg_timeout=30, device_reload_sleep=20): ''' Clean yaml file schema: ----------------------- tftp_boot: image: <Image to boot with `str`> (Mandatory) ip_address: <Management ip address to configure to reach to the TFTP server `str`> (Mandatory) subnet_mask: <Management subnet mask `str`> (Mandatory) gateway: <Management gateway `str`> (Mandatory) tftp_server: <tftp server is reachable with management interface `str`> (Mandatory) recovery_password: <Enable password for device required after bootup `str`> (Optional, Default None) save_system_config: <Whether or not to save the system config if it was modified `bool`> (Optional) Default: True timeout: <Max time during which TFTP boot must complete `int`> (Optional, Default 600 seconds) config_reg_timeout: <Max time to set config-register `int`> (Optional, Default 30 seconds) device_reload_sleep: <Max time to wait after reloading device with config-register 0x0 `int`> (Optional, Default 20 seconds) Example: -------- tftp_boot: image: - /auto/some-location/that-this/image/stay-isr-image.bin ip_address: [10.1.7.126, 10.1.7.127] gateway: 10.1.7.1 subnet_mask: 255.255.255.0 tftp_server: 11.1.7.251 recovery_password: nbv_12345 save_system_config: False timeout: 600 config_reg_timeout: 10 device_reload_sleep: 30 There is more than one ip address, one for each supervisor. Flow: ----- Before: Any After: Connect ''' log.info("Section steps:" "\n1- Set config-register to 0x0" "\n2- Bring device down to rommon> prompt prior to TFTP boot" "\n3- Begin TFTP boot" "\n4- Reconnect to device after TFTP boot" "\n5- Reset config-register to 0x2101" "\n6- Execute 'write memory'") # Set config-register to 0x0 with steps.start("Set config-register to 0x0 on {}".format( device.name)) as step: try: device.api.execute_set_config_register(config_register='0x0', timeout=config_reg_timeout) except Exception as e: step.failed( "Unable to set config-register to 0x0 prior to TFTP" " boot on {}".format(device.name), ) # Bring the device down to rommon> prompt prior to TFTP boot with steps.start("Bring device {} down to rommon> prompt prior to TFTP boot".\ format(device.name)) as step: # Using sendline, as we dont want unicon boot to kick in and send "boot" # to the device. Cannot use device.reload() directly as in case of HA, # we need both sup to do the commands device.sendline('reload') device.sendline('yes') if save_system_config else device.sendline('no') device.sendline() # We now want to overwrite the statemachine device.destroy_all() # Sleep to make sure the device is reloading time.sleep(device_reload_sleep) # Begin TFTP boot of device with steps.start("Begin TFTP boot of device {}".format( device.name)) as step: # Need to instantiate to get the device.start # The device.start only works because of a|b device.instantiate(connection_timeout=timeout) tftp_boot = { 'ip_address': ip_address, 'subnet_mask': subnet_mask, 'gateway': gateway, 'tftp_server': tftp_server, 'image': image } try: abstract = Lookup.from_device(device, packages={'clean': clean}) # Item is needed to be able to know in which parallel child # device.start only gets filled with single rp devices # for multiple rp devices we need to use subconnections if device.is_ha and hasattr(device, 'subconnections'): start = [i.start[0] for i in device.subconnections] else: start = device.start result = pcall(abstract.clean.stages.recovery.recovery_worker, start=start, ikwargs = [{'item': i} for i, _ in enumerate(start)], ckwargs = \ {'device': device, 'timeout': timeout, 'tftp_boot': tftp_boot, 'break_count': 0, # Irrelevant as we will not use this pattern anyway # But needed for the recovery 'console_activity_pattern': '\\.\\.\\.\\.', 'golden_image': None, 'recovery_password': recovery_password}) except Exception as e: log.error(str(e)) step.failed("Failed to TFTP boot the device '{}'".\ format(device.name), ) else: log.info("Successfully performed TFTP boot on device '{}'".\ format(device.name)) # Disconnect and reconnect to the device with steps.start("Reconnect to device {} after TFTP boot".\ format(device.name)) as step: if not _disconnect_reconnect(device): # If that still doesnt work, Thats all we got step.failed( "Cannot reconnect to the device {d} after TFTP boot".format( d=device.name), ) else: log.info("Success - Have recovered and reconnected to device '{}'".\ format(device.name)) # Reset config-register to 0x2101 with steps.start("Reset config-register to 0x2101 on {}".\ format(device.name)) as step: try: device.api.execute_set_config_register(config_register='0x2102', timeout=config_reg_timeout) except Exception as e: log.error(str(e)) step.failed( "Unable to reset config-register to 0x2101 after TFTP" " boot on {}".format(device.name), ) # Execute 'write memory' with steps.start("Execute 'write memory' on {}".format( device.name)) as step: try: device.api.execute_write_memory() except Exception as e: log.error(str(e)) step.failed( "Unable to execute 'write memory' after TFTP boot " "on {}".format(device.name), ) else: section.passed("Successfully performed TFTP boot on device {}".\ format(device.name))
def tftp_boot(section, steps, device, ip_address, subnet_mask, gateway, tftp_server, image, timeout, reconnect_delay=60, reboot_delay=20): ''' Clean yaml file schema: ----------------------- tftp_boot: image: <Image to boot with `str`> (Mandatory) ip_address: <Management ip address to configure to reach to the TFTP server `str`> (Mandatory) subnet_mask: <Management subnet mask `str`> (Mandatory) gateway: <Management gateway `str`> (Mandatory) tftp_server: <tftp server is reachable with management interface> (Mandatory) timeout: <Maximum time for tftp boot `int`> (Mandatory) reboot_delay: <Maximum time for tftp boot `int`> (Optional) reconnect_delay: <Once device recovered, delay before final reconnect>, 'int'> (Default: 60) Example: -------- tftp_boot: image: - /auto/some-location/that-this/image/stay-isr-image.bin ip_address: [10.1.7.126, 10.1.7.127] gateway: 10.1.7.1 subnet_mask: 255.255.255.0 tftp_server: 11.1.7.251 There is more than one ip address, one for each supervisor. Flow: ----- Before: Any After: Connect ''' # If the tftp boot has already ran - recovery # Then do not run it again and skip this section if section.parameters['common_data'].get('device_tftp_booted'): section.skipped('The global recovery has already booted the device with' ' the provided tftp image - no need to do it again') device.api.execute_write_erase_boot() # Using sendline, as we dont want unicon boot to kick in and send "boot" to # the device # Cannot use .reload as in case of HA, we need both sup to do the commands device.sendline('reload') device.sendline('y') device.sendline() log.info('** Rebooting the device **') # We now want to overwrite the statemachine device.destroy_all() # Sleep to make sure the device is reloading time.sleep(reboot_delay) # Need to instantiate to get the device.start # The device.start only works because of a|b device.instantiate(connection_timeout=timeout) tftp_boot = {'ip_address': ip_address, 'subnet_mask': subnet_mask, 'gateway': gateway, 'tftp_server': tftp_server, 'image': image} try: abstract = Lookup.from_device(device, packages={'clean': clean}) # Item is needed to be able to know in which parallel child # we are result = pcall(abstract.clean.stages.recovery.recovery_worker, start=device.start, ikwargs = [{'item': i} for i, _ in enumerate(device.start)], ckwargs = \ {'device': device, 'timeout': timeout, 'tftp_boot': tftp_boot, # Irrelevant as we will not use this pattern anyway # But needed for the recovery 'break_count': 0, 'console_activity_pattern': '\\.\\.\\.\\.', 'golden_image': None, 'recovery_password': None}) except Exception as e: log.error(str(e)) section.failed("Failed to recover the device '{}'".\ format(device.name)) else: log.info("Successfully recovered the device '{}'".\ format(device.name)) log.info('Sleeping for {r} before reconnection'.format(r=reconnect_delay)) time.sleep(reconnect_delay) # Disconnect and reconnect to the device if not _disconnect_reconnect(device): # If that still doesnt work, Thats all we got section.failed("Cannot reconnect to the device {d}". format(d=device.name)) else: log.info("Success - Have recovered and reconnected to device '{}'".\ format(device.name)) log.info('Set the boot variables') output = device.api.get_running_image() if not output: section.failed('Could not retrieved the running image') image = output[0].rsplit('/', 1)[1] device.api.execute_change_boot_variable(system='bootflash:/{image}' .format(image=image)) device.api.execute_copy_run_to_start()
def _connectivity(device, console_activity_pattern=None, console_breakboot_char=None, grub_activity_pattern=None, grub_breakboot_char=None, break_count=10, timeout=None, golden_image=None, tftp_boot=None, recovery_password=None, clear_line=True, powercycler=True, powercycler_delay=30, section=None, reconnect_delay=60): '''Powercycle the device and start the recovery process Args: device ('obj'): Device object console_activity_pattern: <Break pattern on the device for normal boot mode, 'str'> console_breakboot_char: <Character to send when console_activity_pattern is matched, 'str'> grub_activity_pattern: <Break pattern on the device for grub boot mode, 'str'> grub_breakboot_char: <Character to send when grub_activity_pattern is matched, 'str'> break_count ('int'): Number of sending break times timeout ('int'): Recovery process timeout golden_image ('dict'): information to load golden image on the device tftp_boot ('dict'): Tftp boot information recovery_password ('str'): Device password after recovery powercycler: <Should powercycler execute, 'bool'> (Default: True) clear_line: <Should clearline execute, 'bool'> (Default: True) powercycler_delay: <Powercycler delay between on/off>, 'int'> (Default: 30) reconnect_delay: <Once device recovered, delay before final reconnect>, 'int'> (Default: 60) Returns: None ''' # Step-2: Clear console port line if clear_line: log.info(banner("Clearing the console port line")) try: device.api.execute_clear_line() except Exception as e: log.warning(str(e)) log.warning("Unable to clear console port line") else: log.info("Successfully cleared console port line on device '{}'".\ format(device.name)) # Attempt disconnecting and reconnecting after clearing line if _disconnect_reconnect(device): # All good! log.info("Successfully re-connected to device '{}'".\ format(device.name)) return else: log.warning("Cannot re-connect to device '{}' after clearing " "console port line".format(device.name)) # Step-3: Powercycle device if powercycler: log.info(banner("Powercycling device '{}'".format(device.name))) try: device.api.execute_power_cycle_device(delay=powercycler_delay) except Exception as e: log.error(str(e)) raise Exception("Failed to powercycle device '{}'".format(device.name)) else: log.info("Successfully powercycled device '{}' during recovery".\ format(device.name)) # Step-4: Boot device with given golden image if golden_image: log.info(banner("Booting device '{}' with the Golden images".\ format(device.name))) log.info("Golden image information found:\n{}".format(golden_image)) elif tftp_boot: log.info(banner("Booting device '{}' with the Tftp images".\ format(device.name))) log.info("Tftp boot information found:\n{}".format(tftp_boot)) else: # This case is for the simple boot # Not yet supported raise Exception('Global recovery only support golden image and tftp ' 'boot recovery and neither was provided') # Need to instantiate to get the device.start # The device.start only works because of a|b device.instantiate(connection_timeout=timeout) # For each default connection, start a fork to try to recover the device try: abstract = Lookup.from_device(device, packages={'clean': clean}) # Item is needed to be able to know in which parallel child # we are if device.is_ha and hasattr(device, 'subconnections'): start = [i.start[0] for i in device.subconnections] else: start = device.start pcall( abstract.clean.recovery.recovery.recovery_worker, start=start, ikwargs = [{'item': i} for i, _ in enumerate(start)], ckwargs = { 'device': device, 'console_activity_pattern': console_activity_pattern, 'console_breakboot_char': console_breakboot_char, 'grub_activity_pattern': grub_activity_pattern, 'grub_breakboot_char': grub_breakboot_char, 'break_count': break_count, 'timeout': timeout, 'golden_image': golden_image, 'tftp_boot': tftp_boot, 'recovery_password': recovery_password} ) except Exception as e: log.error(str(e)) raise Exception("Failed to recover the device '{}'".\ format(device.name)) else: log.info("Successfully recovered the device '{}'".\ format(device.name)) log.info('Sleeping for {r} before reconnection'.format(r=reconnect_delay)) time.sleep(reconnect_delay) # Step-5: Disconnect and reconnect to the device if not _disconnect_reconnect(device): # If that still doesnt work, Thats all we got raise Exception("Cannot recover the device '{d}'\nCannot run clean".\ format(d=device.name)) else: log.info("Success - Have recovered and reconnected to device '{}'".\ format(device.name))
def tftp_boot(self, device, ip_address, subnet_mask, gateway, tftp_server, image, timeout, reconnect_delay=RECONNECT_DELAY, reboot_delay=REBOOT_DELAY): device.api.execute_write_erase_boot() # Using sendline, as we dont want unicon boot to kick in and send "boot" to # the device # Cannot use .reload as in case of HA, we need both sup to do the commands device.sendline('reload') device.sendline('y') device.sendline() log.info('** Rebooting the device **') # We now want to overwrite the statemachine device.destroy_all() # Sleep to make sure the device is reloading time.sleep(reboot_delay) # Need to instantiate to get the device.start # The device.start only works because of a|b device.instantiate(connection_timeout=timeout) tftp_boot = {'ip_address': ip_address, 'subnet_mask': subnet_mask, 'gateway': gateway, 'tftp_server': tftp_server, 'image': image} try: abstract = Lookup.from_device(device, packages={'clean': clean}) # Item is needed to be able to know in which parallel child # we are # device.start only gets filled with single rp devices # for multiple rp devices we need to use subconnections if device.is_ha and hasattr(device, 'subconnections'): start = [i.start[0] for i in device.subconnections] else: start = device.start result = pcall(abstract.clean.recovery.recovery.recovery_worker, start=start, ikwargs = [{'item': i} for i, _ in enumerate(start)], ckwargs = \ {'device': device, 'timeout': timeout, 'tftp_boot': tftp_boot, # Irrelevant as we will not use this pattern anyway # But needed for the recovery 'break_count': 0, 'console_activity_pattern': '\\.\\.\\.\\.', 'golden_image': None, 'recovery_password': None}) except Exception as e: log.error(str(e)) self.failed("Failed to recover the device '{}'".\ format(device.name)) else: log.info("Successfully recovered the device '{}'".\ format(device.name)) log.info('Sleeping for {r} before reconnection'.format(r=reconnect_delay)) time.sleep(reconnect_delay) # Disconnect and reconnect to the device if not _disconnect_reconnect(device): # If that still doesnt work, Thats all we got self.failed("Cannot reconnect to the device {d}". format(d=device.name)) else: log.info("Success - Have recovered and reconnected to device '{}'".\ format(device.name)) log.info('Set the boot variables') output = device.api.get_running_image() if not output: self.failed('Could not retrieved the running image') image = output[0].rsplit('/', 1)[1] device.api.execute_change_boot_variable(system='bootflash:/{image}' .format(image=image)) device.api.execute_copy_run_to_start()
def _generate(self): """The _generate is called by the testbed creator - It starts here Takes testbed information and writes the topology information into a yaml file Returns: dict: The intermediate dictionary format of the testbed data. """ if self._debug_log: log_file = self.create_debug_log() else: log_file = '' # Load testbed file testbed = load(self._testbed_file) # Re-open the testbed file as yaml so we can read the # connection password - so we can re-create the yaml # with these credential with open(self._testbed_file, 'r') as stream: try: testbed_yaml = safe_load(stream) except YAMLError as exc: raise exc('Error Loading Yaml file {}'.format( self._testbed_file)) if self._config_discovery: reply = '' while reply != 'y': reply = input('Running creator with config-discovery will ' 'reset cdp and lldp configuration to basic ' 'configuration is this acceptable (y/n)') if reply == 'n': log.info('Cancelling creator operation') return if reply != 'n' and reply != 'y': log.info('Please respond with only y or n') if self._cred_prompt and self._universal_login: raise Exception( 'Do not use both universal login and credential prompt') # Standardizing exclude networks exclude_networks = [] for network in self._exclude_networks.split(): try: exclude_networks.append(ipaddress.ip_network(network)) except Exception: raise Exception( 'IP range given {ip} is not valid'.format(ip=network)) # take aliases entered by user and format it into dictionary for alias_mapping in self._alias.split(): spli = alias_mapping.split(':') if len(spli) != 2: raise Exception('{} is not valid entry'.format(alias_mapping)) self.alias_dict[spli[0]] = spli[1] dev_man = testbed_manager.TestbedManager( testbed, config=self._config_discovery, ssh_only=self._ssh_only, alias_dict=self.alias_dict, timeout=self._timeout, supported_os=SUPPORTED_OS, logfile=log_file, disable_config=self._disable_config) # Get the credential for the device from the yaml - so can recreate the # yaml with those credential_dict, proxy_set = dev_man.get_credentials_and_proxies( testbed_yaml) # take universal login argument and parse it as new dict if self._universal_login: cred = self._universal_login.split() if len(cred) != 2: raise Exception('{} is not valid format for login'.format( self._universal_login)) credential_dict = { 'default': { 'username': cred[0], 'password': cred[1] } } device_list = {} count = 1 while len(testbed.devices) > len(dev_man.visited_devices): # connect to unvisited devices log.info('Discovery Process Round {}'.format(count)) log.info(' Connecting to devices') log.debug('--------DEBUG LOGS-------') connect, noconnect, skip = dev_man.connect_all_devices( len(testbed.devices)) log.debug('--------CONSOLE LOGS--------') if connect: log.info(' Successfully connected to devices {}'.format( connect)) if noconnect: log.info( ' Failed to connect to devices {}'.format(noconnect)) if skip: log.info(' Skipped connecting to devices {}'.format(skip)) # Configure these connected devices if dev_man.config: log.info( ' Configuring Testbed devices cdp and lldp protocol') log.debug('--------DEBUG LOGS-------') dev_man.configure_testbed_cdp_protocol() dev_man.configure_testbed_lldp_protocol() log.debug('--------CONSOLE LOGS--------') time.sleep(5) if dev_man.cdp_configured: log.info(' cdp was configured for devices {}'.format( dev_man.cdp_configured)) else: log.info(' cdp was not configured on any device') if dev_man.lldp_configured: log.info(' lldp was configured for devices {}'.format( dev_man.lldp_configured)) else: log.info(' lldp was not configured on any device') # Get the cdp/lldp operation data and massage it into our structure format log.info(' Finding neighbors information') log.debug('--------DEBUG LOGS-------') result = dev_man.get_neigbor_data() connections = self.process_neighbor_data(testbed, device_list, exclude_networks, result) log.debug('Connections found in current set of devices: {}'.format( connections)) log.debug('--------DEBUG LOGS-------') device_ip_string = self.format_debug_string(device_list, dev_man) log.debug(device_ip_string) # Create new devices to add to testbed # This make testbed.devices grow, add these new devices new_devs = self._write_devices_into_testbed( device_list, proxy_set, credential_dict, testbed) log.debug('--------CONSOLE LOGS--------') if new_devs: log.info( ' Found these new devices {} - Restarting a new discovery process' .format(new_devs)) # add the connections that were found to the topology self._write_connections_to_testbed(connections, testbed) log.info('') if self._only_links: break count += 1 log.debug('--------DEBUG LOGS-------') # get IP address for interfaces log.debug('Get interface ip addresses') pcall(dev_man.get_interfaces_ipV4_address, device=testbed.devices.values()) log.debug('--------CONSOLE LOGS--------') # unconfigure cdp and lldp on devices that were configured by script if self._config_discovery: log.info( 'Unconfiguring cdp and lldp protocols on configured devices') log.debug('--------DEBUG LOGS-------') pcall(dev_man.unconfigure_neighbor_discovery_protocols, device=testbed.devices.values()) log.debug('--------CONSOLE LOGS--------') if dev_man.cdp_configured: log.info(' CDP was unconfigured on {}'.format( dev_man.cdp_configured)) if dev_man.lldp_configured: log.info(' LLDP was unconfigured on {}'.format( dev_man.lldp_configured)) # add the new information into testbed_yaml final_yaml = self.create_yaml_dict(testbed, testbed_yaml, credential_dict) log.info('') if log_file: log.info('Debug log generated: {}'.format(log_file)) # return final topology return final_yaml
def tftp_boot(section, steps, device, ip_address, subnet_mask, gateway, tftp_server, image, timeout=600, config_reg_timeout=30, device_reload_sleep=20, recovery_username=None, recovery_password=None): ''' Clean yaml file schema: ----------------------- tftp_boot: image: <Image to boot with `str`> (Mandatory) ip_address: <Management ip address to configure to reach to the TFTP server `str`> (Mandatory) subnet_mask: <Management subnet mask `str`> (Mandatory) gateway: <Management gateway `str`> (Mandatory) tftp_server: <tftp server is reachable with management interface `str`> (Mandatory) timeout: <Maximum time during which TFTP boot process must complete `int`> (Optional, Default 600 seconds) config_reg_timeout: <Time to wait after setting config-register `int`> (Optional, Default 30 seconds) device_reload_sleep: <Time to wait after reloading device `int`> (Optional, Default 20 seconds) recovery_username: <Enable username for device required after bootup `str`> (Optional, Default None) recovery_password: <Enable password for device required after bootup `str`> (Optional, Default None) Example: -------- tftp_boot: image: - /auto/some-location/that-this/image/asr9k-mini-px.vm ip_address: [10.1.7.126, 10.1.7.127] gateway: 10.1.7.1 subnet_mask: 255.255.255.0 tftp_server: 11.1.7.251 timeout: 1200 config_reg_timeout: 60 device_reload_sleep: 300 recovery_username: admin recovery_password: nbv_12345 Note: There is more than one ip address, one for each supervisor. Flow: ----- Before: Any After: Connect ''' log.info( "Section steps:\n1- Verify global recovery has not recovered device" "\n2- Set config-register to 0x1820" "\n3- Bring device down to rommon> prompt prior to TFTP boot" "\n4- Begin TFTP boot" "\n5- Reconnect to device after TFTP boot" "\n6- Reset config-register to 0x1922") # If the tftp boot has already ran - recovery # Then do not run it again and skip this section if section.parameters['common_data'].get('device_tftp_booted'): section.skipped( 'The device recovery has already booted the device with' ' the provided tftp image - no need to do it again') # Set config-register to 0x1820 with steps.start("Set config-register to 0x1820 on {}".\ format(device.name)) as step: try: device.api.execute_set_config_register(config_register='0x1820', timeout=config_reg_timeout) except Exception as e: section.failed( "Unable to set config-register to 0x1820 prior to TFTP" " boot on {}".format(device.name), goto=['exit']) # Bring the device down to rommon > prompt prior to TFTP boot with steps.start("Bring device {} down to rommon > prompt prior to TFTP boot".\ format(device.name)) as step: # Reload device try: device.admin_execute("reload location all") except Exception as e: # We now want to overwrite the statemachine device.destroy_all() # Sleep to make sure the device is reloading time.sleep(device_reload_sleep) else: section.failed("Unable to bring the device down to rommon> prompt", goto=['exit']) # Begin TFTP boot of device with steps.start("Begin TFTP boot of device {}".format( device.name)) as step: # Need to instantiate to get the device.start # The device.start only works because of a|b device.instantiate(connection_timeout=timeout) tftp_boot = { 'ip_address': ip_address, 'subnet_mask': subnet_mask, 'gateway': gateway, 'tftp_server': tftp_server, 'image': image } try: abstract = Lookup.from_device(device, packages={'clean': clean}) # Item is needed to be able to know in which parallel child # we are result = pcall(abstract.clean.stages.recovery.recovery_worker, start=device.start, ikwargs = [{'item': i} for i, _ in enumerate(device.start)], ckwargs = \ {'device': device, 'timeout': timeout, 'tftp_boot': tftp_boot, 'break_count': 0, # Irrelevant as we will not use this pattern anyway # But needed for the recovery 'console_activity_pattern': '\\.\\.\\.\\.', 'golden_image': None, 'recovery_username': recovery_username, 'recovery_password': recovery_password}) except Exception as e: log.error(str(e)) section.failed("Failed to TFTP boot the device '{}'".\ format(device.name), goto=['exit']) else: log.info("Successfully performed TFTP boot on device '{}'".\ format(device.name)) # Disconnect and reconnect to the device with steps.start("Reconnect to device {} after TFTP boot".\ format(device.name)) as step: if not _disconnect_reconnect(device): # If that still doesnt work, Thats all we got section.failed( "Cannot reconnect to the device {d} after TFTP boot".format( d=device.name), goto=['exit']) else: log.info("Success - Have recovered and reconnected to device '{}'".\ format(device.name)) # Reset config-register to 0x1922 with steps.start("Reset config-register to 0x1922 on {}".\ format(device.name)) as step: try: device.api.execute_set_config_register(config_register='0x1922', timeout=config_reg_timeout) except Exception as e: log.error(str(e)) section.failed( "Unable to reset config-register to 0x1922 after TFTP" " boot on {}".format(device.name), goto=['exit'])
def learn_the_system_from_conf_ops(self, testbed, steps, features=None): """Learn and store the system properties Args: testbed (`obj`): Testbed object steps (`obj`): aetest steps object features (`dict`): dict of feature and attributes which want to learn. ex. {'conf.pim.Pim': [ 'pim[vrf_attr][(.*)][address_family_attr][ipv4][send_rp_announce_intf]', 'pim[vrf_attr][(.*)][address_family_attr][ipv4][send_rp_announce_group_list]'], 'conf.bgp.Bgp': ['bgp[instance][(.*)][vrf_attr][(.*)][confederation_peers_as]']} Returns: None Raises: pyATS Results """ def remove_parent_from_conf_dict(conf_dict): temp_dict = deepcopy(conf_dict) for key, val in temp_dict.items(): if key == 'parent': conf_dict.pop('parent') if isinstance(val, dict): remove_parent_from_conf_dict(conf_dict[key]) def store_structure(device, feature): # get feature and attributes [(ft, attr)] = feature.items() log.info( banner("Learning '{n}' feature with " "attribues {a} on device {d}".format(n=ft, a=attr, d=device))) # perform lookup per device lib = Lookup.from_device(device) # attach ops and conf lib.conf = getattr(lib, 'conf', conf) lib.ops = getattr(lib, 'ops', ops) # create the ops/conf instance try: obj = attrgetter(ft)(lib) except Exception: raise AttributeError('Cannot load %s for ' 'device %s.' % (ft, device.name)) # conf learn_config if issubclass(obj, ConfBase): ret = obj.learn_config(device=device, attributes=attr) ret = _to_dict(ret[0]) # delete the non-used objects for pcall to retrun ret.pop('__testbed__') ret.pop('devices') ret.pop('interfaces') remove_parent_from_conf_dict(ret['device_attr'][device.name]) elif issubclass(obj, OpsBase): ret = obj(device, attributes=attr) ret.learn() temp = AttrDict() temp.info = getattr(ret, 'info', {}) ret = temp ret_dict = {} ret_dict.setdefault('lts', {}).\ setdefault(ft, {}).setdefault(device.name, ret) # return the dictionary return ret_dict devices = [] for name in testbed.devices: dev = testbed.devices[name] if not dev.is_connected(): continue devices.append(dev) # create the abstract object list merged_dict = {} for ft in features: worker_devs = [] worker_features = [] for device in devices: worker_devs.append(device) worker_features.append({ft: features[ft]}) # pcall for each feature ret = pcall(store_structure, device=worker_devs, feature=worker_features) [merge_dict(merged_dict, i) for i in ret] self.parent.parameters.update(merged_dict) # print out what we learned in LTS log.info('LTS information is \n{d}'.format(d=dumps(merged_dict, indent=5)))