def get(self): """ Get settings """ args = request.args hostname = None device_type = None model = None if 'hostname' in args: if Device.valid_hostname(args['hostname']): hostname = args['hostname'] else: return empty_result('error', "Invalid hostname specified"), 400 with sqla_session() as session: dev: Device = session.query(Device).\ filter(Device.hostname == hostname).one_or_none() if dev: device_type = dev.device_type model = dev.model else: return empty_result('error', "Hostname not found in database"), 400 if 'device_type' in args: if DeviceType.has_name(args['device_type'].upper()): device_type = DeviceType[args['device_type'].upper()] else: return empty_result('error', "Invalid device type specified"), 400 try: settings, settings_origin = get_settings(hostname, device_type, model) except Exception as e: return empty_result('error', "Error getting settings: {}".format(str(e))), 400 return empty_result(data={'settings': settings, 'settings_origin': settings_origin})
def push_base_management_access(task, device_variables, job_id): set_thread_data(job_id) logger = get_logger() logger.debug("Push basetemplate for host: {}".format(task.host.name)) with open('/etc/cnaas-nms/repository.yml', 'r') as db_file: repo_config = yaml.safe_load(db_file) local_repo_path = repo_config['templates_local'] mapfile = os.path.join(local_repo_path, task.host.platform, 'mapping.yml') if not os.path.isfile(mapfile): raise RepoStructureException( "File {} not found in template repo".format(mapfile)) with open(mapfile, 'r') as f: mapping = yaml.safe_load(f) template = mapping['ACCESS']['entrypoint'] settings, settings_origin = get_settings(task.host.name, DeviceType.ACCESS) # Add all environment variables starting with TEMPLATE_SECRET_ to # the list of configuration variables. The idea is to store secret # configuration outside of the templates repository. template_secrets = {} for env in os.environ: if env.startswith('TEMPLATE_SECRET_'): template_secrets[env] = os.environ[env] # Merge dicts, this will overwrite interface list from settings template_vars = {**settings, **device_variables, **template_secrets} r = task.run(task=text.template_file, name="Generate initial device config", template=template, path=f"{local_repo_path}/{task.host.platform}", **template_vars) #TODO: Handle template not found, variables not defined task.host["config"] = r.result # Use extra low timeout for this since we expect to loose connectivity after changing IP task.host.connection_options["napalm"] = ConnectionOptions( extras={"timeout": 30}) try: task.run(task=networking.napalm_configure, name="Push base management config", replace=True, configuration=task.host["config"], dry_run=False) except Exception: task.run(task=networking.napalm_get, getters=["facts"]) if not task.results[-1].failed: raise InitError( "Device {} did not commit new base management config".format( task.host.name))
def find_free_infra_ip(session) -> Optional[IPv4Address]: """Returns first free IPv4 infra IP.""" used_ips = [] device_query = session.query(Device). \ filter(Device.infra_ip != None).options(load_only("infra_ip")) for device in device_query: used_ips.append(device.infra_ip) settings, settings_origin = get_settings(device_type=DeviceType.CORE) infra_ip_net = IPv4Network(settings['underlay']['infra_lo_net']) for num, net in enumerate(infra_ip_net.subnets(new_prefix=32)): ipaddr = IPv4Address(net.network_address) if ipaddr in used_ips: continue else: return ipaddr return None
def find_free_infra_linknet(session) -> Optional[IPv4Network]: """Returns first free IPv4 infra linknet (/31).""" used_linknets = [] linknet_query = session.query(Linknet). \ filter(Linknet.device_a_ip != None) ln: Linknet for ln in linknet_query: used_linknets.append(IPv4Interface(ln.ipv4_network).network) settings, settings_origin = get_settings(device_type=DeviceType.CORE) infra_ip_net = IPv4Network(settings['underlay']['infra_link_net']) for num, net in enumerate(infra_ip_net.subnets(new_prefix=31)): if net in used_linknets: continue else: return net return None
def push_base_management_access(task, device_variables): logger.debug("Push basetemplate for host: {}".format(task.host.name)) with open('/etc/cnaas-nms/repository.yml', 'r') as db_file: repo_config = yaml.safe_load(db_file) local_repo_path = repo_config['templates_local'] mapfile = os.path.join(local_repo_path, task.host.platform, 'mapping.yml') if not os.path.isfile(mapfile): raise RepoStructureException( "File {} not found in template repo".format(mapfile)) with open(mapfile, 'r') as f: mapping = yaml.safe_load(f) template = mapping['ACCESS']['entrypoint'] settings, settings_origin = get_settings(task.host.name, DeviceType.ACCESS) # Merge dicts template_vars = {**device_variables, **settings} r = task.run(task=text.template_file, name="Generate initial device config", template=template, path=f"{local_repo_path}/{task.host.platform}", **template_vars) #TODO: Handle template not found, variables not defined task.host["config"] = r.result # Use extra low timeout for this since we expect to loose connectivity after changing IP task.host.connection_options["napalm"] = ConnectionOptions( extras={"timeout": 5}) task.run( task=networking.napalm_configure, name="Push base management config", replace=True, configuration=task.host["config"], dry_run=False # TODO: temp for testing )
def put(self, hostname): """Take a map of interfaces and associated values to update. Example: {"interfaces": {"Ethernet1": {"configtype": "ACCESS_AUTO"}}} """ json_data = request.get_json() data = {} errors = [] device_settings = None with sqla_session() as session: dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none() if not dev: return empty_result('error', "Device not found"), 404 updated = False if 'interfaces' in json_data and isinstance(json_data['interfaces'], dict): for if_name, if_dict in json_data['interfaces'].items(): if not isinstance(if_dict, dict): errors.append("Each interface must have a dict with data to update") continue intf: Interface = session.query(Interface).filter(Interface.device == dev).\ filter(Interface.name == if_name).one_or_none() if not intf: errors.append(f"Interface {if_name} not found") continue if intf.data and isinstance(intf.data, dict): intfdata_original = dict(intf.data) intfdata = dict(intf.data) else: intfdata_original = {} intfdata = {} if 'configtype' in if_dict: configtype = if_dict['configtype'].upper() if InterfaceConfigType.has_name(configtype): if intf.configtype != InterfaceConfigType[configtype]: intf.configtype = InterfaceConfigType[configtype] updated = True data[if_name] = {'configtype': configtype} else: errors.append(f"Invalid configtype received: {configtype}") if 'data' in if_dict: # TODO: maybe this validation should be done via # pydantic if it gets more complex if not device_settings: device_settings, _ = get_settings(hostname, dev.device_type) if 'vxlan' in if_dict['data']: if if_dict['data']['vxlan'] in device_settings['vxlans']: intfdata['vxlan'] = if_dict['data']['vxlan'] else: errors.append("Specified VXLAN {} is not present in {}".format( if_dict['data']['vxlan'], hostname )) if 'untagged_vlan' in if_dict['data']: vlan_id = resolve_vlanid(if_dict['data']['untagged_vlan'], device_settings['vxlans']) if vlan_id: intfdata['untagged_vlan'] = if_dict['data']['untagged_vlan'] else: errors.append("Specified VLAN name {} is not present in {}".format( if_dict['data']['untagged_vlan'], hostname )) if 'tagged_vlan_list' in if_dict['data']: if isinstance(if_dict['data']['tagged_vlan_list'], list): vlan_id_list = resolve_vlanid_list(if_dict['data']['tagged_vlan_list'], device_settings['vxlans']) if len(vlan_id_list) == len(if_dict['data']['tagged_vlan_list']): intfdata['tagged_vlan_list'] = if_dict['data']['tagged_vlan_list'] else: errors.append("Some VLAN names {} are not present in {}".format( ", ".join(if_dict['data']['tagged_vlan_list']), hostname )) else: errors.append("tagged_vlan_list should be of type list, found {}".format( type(if_dict['data']['tagged_vlan_list']) )) if 'neighbor' in if_dict['data']: if isinstance(if_dict['data']['neighbor'], str) and \ Device.valid_hostname(if_dict['data']['neighbor']): intfdata['neighbor'] = if_dict['data']['neighbor'] else: errors.append("Neighbor must be valid hostname, got: {}".format( if_dict['data']['neighbor'])) if 'description' in if_dict['data']: if isinstance(if_dict['data']['description'], str) and \ len(if_dict['data']['description']) <= 64: if if_dict['data']['description']: intfdata['description'] = if_dict['data']['description'] elif 'description' in intfdata: del intfdata['description'] elif if_dict['data']['description'] is None: if 'description' in intfdata: del intfdata['description'] else: errors.append( "Description must be a string of 0-64 characters for: {}". format(if_dict['data']['description'])) if 'enabled' in if_dict['data']: if type(if_dict['data']['enabled']) == bool: intfdata['enabled'] = if_dict['data']['enabled'] else: errors.append( "Enabled must be a bool, true or false, got: {}". format(if_dict['data']['enabled'])) if intfdata != intfdata_original: intf.data = intfdata updated = True if if_name in data: data[if_name]['data'] = intfdata else: data[if_name] = {'data': intfdata} if updated: dev.synchronized = False if errors: if data: ret = {'errors': errors, 'updated': data} else: ret = {'errors': errors} return empty_result(status='error', data=ret), 400 else: return empty_result(status='success', data={'updated': data})
def update_linknets(session, hostname: str, devtype: DeviceType, ztp_hostname: Optional[str] = None, dry_run: bool = False) -> List[dict]: """Update linknet data for specified device using LLDP neighbor data. """ logger = get_logger() result = get_neighbors(hostname=hostname)[hostname][0] if result.failed: raise Exception("Could not get LLDP neighbors for {}".format(hostname)) neighbors = result.result['lldp_neighbors'] if ztp_hostname: settings_hostname = ztp_hostname else: settings_hostname = hostname ret = [] local_device_inst: Device = session.query(Device).filter( Device.hostname == hostname).one() logger.debug("Updating linknets for device {} of type {}...".format( local_device_inst.id, devtype.name)) for local_if, data in neighbors.items(): logger.debug( f"Local: {local_if}, remote: {data[0]['hostname']} {data[0]['port']}" ) remote_device_inst: Device = session.query(Device).\ filter(Device.hostname == data[0]['hostname']).one_or_none() if not remote_device_inst: logger.debug( f"Unknown neighbor device, ignoring: {data[0]['hostname']}") continue if remote_device_inst.state in [ DeviceState.DISCOVERED, DeviceState.INIT ]: # In case of MLAG init the peer does not have the correct devtype set yet, # use same devtype as local device instead remote_devtype = devtype elif remote_device_inst.state not in [ DeviceState.MANAGED, DeviceState.UNMANAGED ]: logger.debug( "Neighbor device has invalid state, ignoring: {}".format( data[0]['hostname'])) continue else: remote_devtype = remote_device_inst.device_type logger.debug( f"Remote device found, device id: {remote_device_inst.id}") local_device_settings, _ = get_settings(settings_hostname, devtype, local_device_inst.model) remote_device_settings, _ = get_settings(remote_device_inst.hostname, remote_devtype, remote_device_inst.model) verify_peer_iftype(hostname, devtype, local_device_settings, local_if, remote_device_inst.hostname, remote_device_inst.device_type, remote_device_settings, data[0]['port']) # Check if linknet object already exists in database local_devid = local_device_inst.id check_linknet = session.query(Linknet).\ filter( ((Linknet.device_a_id == local_devid) & (Linknet.device_a_port == local_if)) | ((Linknet.device_b_id == local_devid) & (Linknet.device_b_port == local_if)) | ((Linknet.device_a_id == remote_device_inst.id) & (Linknet.device_a_port == data[0]['port'])) | ((Linknet.device_b_id == remote_device_inst.id) & (Linknet.device_b_port == data[0]['port'])) ).one_or_none() if check_linknet: logger.debug(f"Found existing linknet id: {check_linknet.id}") if ((check_linknet.device_a_id == local_devid and check_linknet.device_a_port == local_if and check_linknet.device_b_id == remote_device_inst.id and check_linknet.device_b_port == data[0]['port']) or (check_linknet.device_a_id == local_devid and check_linknet.device_a_port == local_if and check_linknet.device_b_id == remote_device_inst.id and check_linknet.device_b_port == data[0]['port'])): # All info is the same, no update required continue else: # TODO: update instead of delete+new insert? if not dry_run: session.delete(check_linknet) session.commit() if devtype in [DeviceType.CORE, DeviceType.DIST] and \ remote_device_inst.device_type in [DeviceType.CORE, DeviceType.DIST]: ipv4_network = find_free_infra_linknet(session) else: ipv4_network = None new_link = Linknet.create_linknet( session, hostname_a=local_device_inst.hostname, interface_a=local_if, hostname_b=remote_device_inst.hostname, interface_b=data[0]['port'], ipv4_network=ipv4_network, strict_check= not dry_run # Don't do strict check if this is a dry_run ) if not dry_run: local_device_inst.synchronized = False remote_device_inst.synchronized = False session.add(new_link) session.commit() else: # Make sure linknet object is not added to session because of foreign key load session.expunge(new_link) # Make return data pretty ret_dict = { 'device_a_hostname': local_device_inst.hostname, 'device_b_hostname': remote_device_inst.hostname, **new_link.as_dict() } del ret_dict['id'] del ret_dict['device_a_id'] del ret_dict['device_b_id'] ret.append({k: ret_dict[k] for k in sorted(ret_dict)}) return ret
def populate_device_vars(session, dev: Device, ztp_hostname: Optional[str] = None, ztp_devtype: Optional[DeviceType] = None): logger = get_logger() device_variables = { 'device_model': dev.model, 'device_os_version': dev.os_version } if ztp_hostname: hostname: str = ztp_hostname else: hostname: str = dev.hostname if ztp_devtype: devtype: DeviceType = ztp_devtype elif dev.device_type != DeviceType.UNKNOWN: devtype: DeviceType = dev.device_type else: raise Exception("Can't populate device vars for device type UNKNOWN") mgmt_ip = dev.management_ip if not ztp_hostname: if not mgmt_ip: raise Exception("Could not find management IP for device {}".format(hostname)) else: device_variables['mgmt_ip'] = str(mgmt_ip) if isinstance(dev.platform, str): platform: str = dev.platform else: raise ValueError("Unknown platform: {}".format(dev.platform)) settings, settings_origin = get_settings(hostname, devtype, dev.model) if devtype == DeviceType.ACCESS: if ztp_hostname: access_device_variables = { 'interfaces': [] } else: mgmtdomain = cnaas_nms.db.helper.find_mgmtdomain_by_ip(session, dev.management_ip) if not mgmtdomain: raise Exception( "Could not find appropriate management domain for management_ip: {}". format(dev.management_ip)) mgmt_gw_ipif = IPv4Interface(mgmtdomain.ipv4_gw) access_device_variables = { 'mgmt_vlan_id': mgmtdomain.vlan, 'mgmt_gw': str(mgmt_gw_ipif.ip), 'mgmt_ipif': str(IPv4Interface('{}/{}'.format(mgmt_ip, mgmt_gw_ipif.network.prefixlen))), 'mgmt_ip': str(mgmt_ip), 'mgmt_prefixlen': int(mgmt_gw_ipif.network.prefixlen), 'interfaces': [] } intfs = session.query(Interface).filter(Interface.device == dev).all() intf: Interface for intf in intfs: untagged_vlan = None tagged_vlan_list = [] intfdata = None try: ifindexnum: int = Interface.interface_index_num(intf.name) except ValueError as e: ifindexnum: int = 0 if intf.data: if 'untagged_vlan' in intf.data: untagged_vlan = resolve_vlanid(intf.data['untagged_vlan'], settings['vxlans']) if 'tagged_vlan_list' in intf.data: tagged_vlan_list = resolve_vlanid_list(intf.data['tagged_vlan_list'], settings['vxlans']) intfdata = dict(intf.data) access_device_variables['interfaces'].append({ 'name': intf.name, 'ifclass': intf.configtype.name, 'untagged_vlan': untagged_vlan, 'tagged_vlan_list': tagged_vlan_list, 'data': intfdata, 'indexnum': ifindexnum }) mlag_vars = get_mlag_vars(session, dev) device_variables = {**device_variables, **access_device_variables, **mlag_vars} elif devtype == DeviceType.DIST or devtype == DeviceType.CORE: infra_ip = dev.infra_ip asn = generate_asn(infra_ip) fabric_device_variables = { 'interfaces': [], 'bgp_ipv4_peers': [], 'bgp_evpn_peers': [], 'mgmtdomains': [], 'asn': asn } if mgmt_ip and infra_ip: mgmt_device_variables = { 'mgmt_ipif': str(IPv4Interface('{}/32'.format(mgmt_ip))), 'mgmt_prefixlen': 32, 'infra_ipif': str(IPv4Interface('{}/32'.format(infra_ip))), 'infra_ip': str(infra_ip), } fabric_device_variables = {**fabric_device_variables, **mgmt_device_variables} # find fabric neighbors fabric_interfaces = {} for neighbor_d in dev.get_neighbors(session): if neighbor_d.device_type == DeviceType.DIST or neighbor_d.device_type == DeviceType.CORE: # TODO: support multiple links to the same neighbor? local_if = dev.get_neighbor_local_ifname(session, neighbor_d) local_ipif = dev.get_neighbor_local_ipif(session, neighbor_d) neighbor_ip = dev.get_neighbor_ip(session, neighbor_d) if local_if: fabric_interfaces[local_if] = { 'name': local_if, 'ifclass': 'fabric', 'ipv4if': local_ipif, 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_ip': str(neighbor_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) } fabric_device_variables['bgp_ipv4_peers'].append({ 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_ip': str(neighbor_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) }) ifname_peer_map = dev.get_linknet_localif_mapping(session) if 'interfaces' in settings and settings['interfaces']: for intf in settings['interfaces']: try: ifindexnum: int = Interface.interface_index_num(intf['name']) except ValueError as e: ifindexnum: int = 0 if 'ifclass' not in intf: continue if intf['ifclass'] == 'downlink': data = {} if intf['name'] in ifname_peer_map: data['description'] = ifname_peer_map[intf['name']] fabric_device_variables['interfaces'].append({ 'name': intf['name'], 'ifclass': intf['ifclass'], 'indexnum': ifindexnum, 'data': data }) elif intf['ifclass'] == 'custom': fabric_device_variables['interfaces'].append({ 'name': intf['name'], 'ifclass': intf['ifclass'], 'config': intf['config'], 'indexnum': ifindexnum }) elif intf['ifclass'] == 'fabric': if intf['name'] in fabric_interfaces: fabric_device_variables['interfaces'].append( {**fabric_interfaces[intf['name']], **{'indexnum': ifindexnum}} ) del fabric_interfaces[intf['name']] else: fabric_device_variables['interfaces'].append({ 'name': intf['name'], 'ifclass': intf['ifclass'], 'indexnum': ifindexnum, 'ipv4if': None, 'peer_hostname': 'ztp', 'peer_infra_lo': None, 'peer_ip': None, 'peer_asn': None }) for local_if, data in fabric_interfaces.items(): logger.warn(f"Interface {local_if} on device {hostname} not " "configured as linknet because of wrong ifclass") if not ztp_hostname: for mgmtdom in cnaas_nms.db.helper.get_all_mgmtdomains(session, hostname): fabric_device_variables['mgmtdomains'].append({ 'id': mgmtdom.id, 'ipv4_gw': mgmtdom.ipv4_gw, 'vlan': mgmtdom.vlan, 'description': mgmtdom.description, 'esi_mac': mgmtdom.esi_mac }) # populate evpn peers data for neighbor_d in get_evpn_peers(session, settings): if neighbor_d.hostname == dev.hostname: continue fabric_device_variables['bgp_evpn_peers'].append({ 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) }) device_variables = {**device_variables, **fabric_device_variables} # Add all environment variables starting with TEMPLATE_SECRET_ to # the list of configuration variables. The idea is to store secret # configuration outside of the templates repository. template_secrets = {} for env in os.environ: if env.startswith('TEMPLATE_SECRET_'): template_secrets[env] = os.environ[env] # Merge all dicts with variables into one, later row overrides # Device variables override any names from settings, for example the # interfaces list from settings are replaced with an interface list from # device variables that contains more information device_variables = {**settings, **device_variables, **template_secrets} return device_variables
def push_sync_device(task, dry_run: bool = True): hostname = task.host.name with sqla_session() as session: dev: Device = session.query(Device).filter( Device.hostname == hostname).one() mgmt_ip = dev.management_ip devtype: DeviceType = dev.device_type if isinstance(dev.platform, str): platform: str = dev.platform else: raise ValueError("Unknown platform: {}".format(dev.platform)) neighbor_hostnames = dev.get_uplink_peers(session) mgmtdomain = cnaas_nms.db.helper.find_mgmtdomain( session, neighbor_hostnames) if not mgmtdomain: raise Exception( "Could not find appropriate management domain for uplink peer devices: {}" .format(neighbor_hostnames)) if not mgmt_ip: raise Exception( "Could not find management IP for device {}".format(hostname)) mgmt_gw_ipif = IPv4Interface(mgmtdomain.ipv4_gw) intfs = session.query(Interface).filter(Interface.device == dev).all() uplinks = [] access_auto = [] intf: Interface for intf in intfs: if intf.configtype == InterfaceConfigType.ACCESS_AUTO: access_auto.append({'ifname': intf.name}) elif intf.configtype == InterfaceConfigType.ACCESS_UPLINK: uplinks.append({'ifname': intf.name}) device_variables = { 'mgmt_ipif': str( IPv4Interface('{}/{}'.format(mgmt_ip, mgmt_gw_ipif.network.prefixlen))), 'mgmt_ip': str(mgmt_ip), 'mgmt_prefixlen': int(mgmt_gw_ipif.network.prefixlen), 'uplinks': uplinks, 'access_auto': access_auto, 'mgmt_vlan_id': mgmtdomain.vlan, 'mgmt_gw': mgmt_gw_ipif.ip } settings, settings_origin = get_settings(hostname, devtype) # Merge dicts template_vars = {**device_variables, **settings} logger.debug("Synchronize device config for host: {}".format( task.host.name)) with open('/etc/cnaas-nms/repository.yml', 'r') as db_file: repo_config = yaml.safe_load(db_file) local_repo_path = repo_config['templates_local'] mapfile = os.path.join(local_repo_path, platform, 'mapping.yml') if not os.path.isfile(mapfile): raise RepoStructureException( "File {} not found in template repo".format(mapfile)) with open(mapfile, 'r') as f: mapping = yaml.safe_load(f) template = mapping[devtype.name]['entrypoint'] r = task.run(task=text.template_file, name="Generate device config", template=template, path=f"{local_repo_path}/{task.host.platform}", **template_vars) # TODO: Handle template not found, variables not defined # jinja2.exceptions.UndefinedError task.host["config"] = r.result task.run(task=networking.napalm_configure, name="Sync device config", replace=True, configuration=task.host["config"], dry_run=dry_run)
def _refresh_repo_task(repo_type: RepoType = RepoType.TEMPLATES) -> str: """Should only be called by refresh_repo function.""" with open('/etc/cnaas-nms/repository.yml', 'r') as db_file: repo_config = yaml.safe_load(db_file) if repo_type == RepoType.TEMPLATES: local_repo_path = repo_config['templates_local'] remote_repo_path = repo_config['templates_remote'] elif repo_type == RepoType.SETTINGS: local_repo_path = repo_config['settings_local'] remote_repo_path = repo_config['settings_remote'] else: raise ValueError("Invalid repository") ret = '' changed_files: Set[str] = set() try: local_repo = Repo(local_repo_path) prev_commit = local_repo.commit().hexsha diff = local_repo.remotes.origin.pull() for item in diff: ret += 'Commit {} by {} at {}\n'.format( item.commit.name_rev, item.commit.committer, item.commit.committed_datetime) diff_files = local_repo.git.diff('{}..{}'.format( prev_commit, item.commit.hexsha), name_only=True).split() changed_files.update(diff_files) prev_commit = item.commit.hexsha except (InvalidGitRepositoryError, NoSuchPathError) as e: logger.info("Local repository {} not found, cloning from remote".\ format(local_repo_path)) try: local_repo = Repo.clone_from(remote_repo_path, local_repo_path) except NoSuchPathError as e: raise ConfigException("Invalid remote repository {}: {}".format( remote_repo_path, str(e))) except GitCommandError as e: raise ConfigException( "Error cloning remote repository {}: {}".format( remote_repo_path, str(e))) ret = 'Cloned new from remote. Last commit {} by {} at {}'.format( local_repo.head.commit.name_rev, local_repo.head.commit.committer, local_repo.head.commit.committed_datetime) if repo_type == RepoType.SETTINGS: try: logger.debug("Clearing redis-lru cache for settings") with redis_session() as redis_db: cache = RedisLRU(redis_db) cache.clear_all_cache() get_settings() test_devtypes = [ DeviceType.ACCESS, DeviceType.DIST, DeviceType.CORE ] for devtype in test_devtypes: get_settings(device_type=devtype) for hostname in os.listdir(os.path.join(local_repo_path, 'devices')): hostname_path = os.path.join(local_repo_path, 'devices', hostname) if not os.path.isdir(hostname_path) or hostname.startswith( '.'): continue if not Device.valid_hostname(hostname): continue get_settings(hostname) check_settings_collisions() except SettingsSyntaxError as e: logger.exception("Error in settings repo configuration: {}".format( str(e))) raise e except VlanConflictError as e: logger.exception("VLAN conflict in repo configuration: {}".format( str(e))) raise e logger.debug( "Files changed in settings repository: {}".format(changed_files)) updated_devtypes, updated_hostnames = settings_syncstatus( updated_settings=changed_files) logger.debug( "Devicestypes to be marked unsynced after repo refresh: {}".format( ', '.join([dt.name for dt in updated_devtypes]))) logger.debug( "Devices to be marked unsynced after repo refresh: {}".format( ', '.join(updated_hostnames))) with sqla_session() as session: devtype: DeviceType for devtype in updated_devtypes: Device.set_devtype_syncstatus(session, devtype, syncstatus=False) for hostname in updated_hostnames: dev: Device = session.query(Device).\ filter(Device.hostname == hostname).one_or_none() if dev: dev.synchronized = False else: logger.warn( "Settings updated for unknown device: {}".format( hostname)) if repo_type == RepoType.TEMPLATES: logger.debug( "Files changed in template repository: {}".format(changed_files)) updated_devtypes = template_syncstatus(updated_templates=changed_files) updated_list = [ '{}:{}'.format(platform, dt.name) for dt, platform in updated_devtypes ] logger.debug( "Devicestypes to be marked unsynced after repo refresh: {}".format( ', '.join(updated_list))) with sqla_session() as session: devtype: DeviceType for devtype, platform in updated_devtypes: Device.set_devtype_syncstatus(session, devtype, platform, syncstatus=False) return ret
def push_sync_device(task, dry_run: bool = True, generate_only: bool = False, job_id: Optional[str] = None, scheduled_by: Optional[str] = None): """ Nornir task to generate config and push to device Args: task: nornir task, sent by nornir when doing .run() dry_run: Don't commit config to device, just do compare/diff generate_only: Only generate text config, don't try to commit or even do dry_run compare to running config Returns: """ set_thread_data(job_id) logger = get_logger() hostname = task.host.name with sqla_session() as session: dev: Device = session.query(Device).filter( Device.hostname == hostname).one() mgmt_ip = dev.management_ip infra_ip = dev.infra_ip if not mgmt_ip: raise Exception( "Could not find management IP for device {}".format(hostname)) devtype: DeviceType = dev.device_type if isinstance(dev.platform, str): platform: str = dev.platform else: raise ValueError("Unknown platform: {}".format(dev.platform)) settings, settings_origin = get_settings(hostname, devtype) device_variables = { 'mgmt_ip': str(mgmt_ip), 'device_model': dev.model, 'device_os_version': dev.os_version } if devtype == DeviceType.ACCESS: mgmtdomain = cnaas_nms.db.helper.find_mgmtdomain_by_ip( session, dev.management_ip) if not mgmtdomain: raise Exception( "Could not find appropriate management domain for management_ip: {}" .format(dev.management_ip)) mgmt_gw_ipif = IPv4Interface(mgmtdomain.ipv4_gw) access_device_variables = { 'mgmt_vlan_id': mgmtdomain.vlan, 'mgmt_gw': str(mgmt_gw_ipif.ip), 'mgmt_ipif': str( IPv4Interface('{}/{}'.format( mgmt_ip, mgmt_gw_ipif.network.prefixlen))), 'mgmt_prefixlen': int(mgmt_gw_ipif.network.prefixlen), 'interfaces': [] } intfs = session.query(Interface).filter( Interface.device == dev).all() intf: Interface for intf in intfs: untagged_vlan = None tagged_vlan_list = [] intfdata = None if intf.data: if 'untagged_vlan' in intf.data: untagged_vlan = resolve_vlanid( intf.data['untagged_vlan'], settings['vxlans']) if 'tagged_vlan_list' in intf.data: tagged_vlan_list = resolve_vlanid_list( intf.data['tagged_vlan_list'], settings['vxlans']) intfdata = dict(intf.data) access_device_variables['interfaces'].append({ 'name': intf.name, 'ifclass': intf.configtype.name, 'untagged_vlan': untagged_vlan, 'tagged_vlan_list': tagged_vlan_list, 'data': intfdata }) mlag_vars = get_mlag_vars(session, dev) device_variables = { **access_device_variables, **device_variables, **mlag_vars } elif devtype == DeviceType.DIST or devtype == DeviceType.CORE: asn = generate_asn(infra_ip) fabric_device_variables = { 'mgmt_ipif': str(IPv4Interface('{}/32'.format(mgmt_ip))), 'mgmt_prefixlen': 32, 'infra_ipif': str(IPv4Interface('{}/32'.format(infra_ip))), 'infra_ip': str(infra_ip), 'interfaces': [], 'bgp_ipv4_peers': [], 'bgp_evpn_peers': [], 'mgmtdomains': [], 'asn': asn } ifname_peer_map = dev.get_linknet_localif_mapping(session) if 'interfaces' in settings and settings['interfaces']: for intf in settings['interfaces']: try: ifindexnum: int = Interface.interface_index_num( intf['name']) except ValueError as e: ifindexnum: int = 0 if 'ifclass' in intf and intf['ifclass'] == 'downlink': data = {} if intf['name'] in ifname_peer_map: data['description'] = ifname_peer_map[intf['name']] fabric_device_variables['interfaces'].append({ 'name': intf['name'], 'ifclass': intf['ifclass'], 'indexnum': ifindexnum, 'data': data }) elif 'ifclass' in intf and intf['ifclass'] == 'custom': fabric_device_variables['interfaces'].append({ 'name': intf['name'], 'ifclass': intf['ifclass'], 'config': intf['config'], 'indexnum': ifindexnum }) for mgmtdom in cnaas_nms.db.helper.get_all_mgmtdomains( session, hostname): fabric_device_variables['mgmtdomains'].append({ 'id': mgmtdom.id, 'ipv4_gw': mgmtdom.ipv4_gw, 'vlan': mgmtdom.vlan, 'description': mgmtdom.description, 'esi_mac': mgmtdom.esi_mac }) # find fabric neighbors fabric_links = [] for neighbor_d in dev.get_neighbors(session): if neighbor_d.device_type == DeviceType.DIST or neighbor_d.device_type == DeviceType.CORE: # TODO: support multiple links to the same neighbor? local_if = dev.get_neighbor_local_ifname( session, neighbor_d) local_ipif = dev.get_neighbor_local_ipif( session, neighbor_d) neighbor_ip = dev.get_neighbor_ip(session, neighbor_d) if local_if: fabric_device_variables['interfaces'].append({ 'name': local_if, 'ifclass': 'fabric', 'ipv4if': local_ipif, 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_ip': str(neighbor_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) }) fabric_device_variables['bgp_ipv4_peers'].append({ 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_ip': str(neighbor_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) }) # populate evpn peers data for neighbor_d in get_evpn_spines(session, settings): if neighbor_d.hostname == dev.hostname: continue fabric_device_variables['bgp_evpn_peers'].append({ 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) }) device_variables = {**fabric_device_variables, **device_variables} # Add all environment variables starting with TEMPLATE_SECRET_ to # the list of configuration variables. The idea is to store secret # configuration outside of the templates repository. template_secrets = {} for env in os.environ: if env.startswith('TEMPLATE_SECRET_'): template_secrets[env] = os.environ[env] # Merge device variables with settings before sending to template rendering # Device variables override any names from settings, for example the # interfaces list from settings are replaced with an interface list from # device variables that contains more information template_vars = {**settings, **device_variables, **template_secrets} with open('/etc/cnaas-nms/repository.yml', 'r') as db_file: repo_config = yaml.safe_load(db_file) local_repo_path = repo_config['templates_local'] mapfile = os.path.join(local_repo_path, platform, 'mapping.yml') if not os.path.isfile(mapfile): raise RepoStructureException( "File {} not found in template repo".format(mapfile)) with open(mapfile, 'r') as f: mapping = yaml.safe_load(f) template = mapping[devtype.name]['entrypoint'] logger.debug("Generate config for host: {}".format(task.host.name)) r = task.run(task=text.template_file, name="Generate device config", template=template, path=f"{local_repo_path}/{task.host.platform}", **template_vars) # TODO: Handle template not found, variables not defined # jinja2.exceptions.UndefinedError task.host["config"] = r.result task.host["template_vars"] = template_vars if generate_only: task.host["change_score"] = 0 else: logger.debug("Synchronize device config for host: {} ({}:{})".format( task.host.name, task.host.hostname, task.host.port)) task.host.open_connection("napalm", configuration=task.nornir.config) task.run(task=networking.napalm_configure, name="Sync device config", replace=True, configuration=task.host["config"], dry_run=dry_run) task.host.close_connection("napalm") if task.results[1].diff: config = task.results[1].host["config"] diff = task.results[1].diff task.host["change_score"] = calculate_score(config, diff) else: task.host["change_score"] = 0 if job_id: with redis_session() as db: db.lpush('finished_devices_' + str(job_id), task.host.name)
def test_get_settings_device(self): settings, settings_origin = get_settings( hostname=self.testdata['testdevice'], device_type=DeviceType.DIST) # Assert that all required settings are set self.assertTrue(all(k in settings for k in self.required_setting_keys))
def test_get_settings_global(self): settings, settings_origin = get_settings() # Assert that all required settings are set self.assertTrue(all(k in settings for k in self.required_setting_keys))