def post(self): """ Add a device """ json_data = request.get_json() supported_platforms = ['eos', 'junos', 'ios', 'iosxr', 'nxos', 'nxos_ssh'] data = {} errors = [] data, errors = Device.validate(**json_data) if errors != []: return empty_result(status='error', data=errors), 400 with sqla_session() as session: instance: Device = session.query(Device).filter(Device.hostname == data['hostname']).one_or_none() if instance: errors.append('Device already exists') return empty_result(status='error', data=errors), 400 if 'platform' not in data or data['platform'] not in supported_platforms: errors.append("Device platform not specified or not known (must be any of: {})". format(', '.join(supported_platforms))) return empty_result(status='error', data=errors), 400 if data['device_type'] in ['DIST', 'CORE']: if 'management_ip' not in data or not data['management_ip']: data['management_ip'] = cnaas_nms.confpush.underlay.find_free_mgmt_lo_ip(session) if 'infra_ip' not in data or not data['infra_ip']: data['infra_ip'] = cnaas_nms.confpush.underlay.find_free_infra_ip(session) new_device = Device.device_create(**data) session.add(new_device) session.flush() return empty_result(status='success', data={"added_device": new_device.as_dict()}), 200
def arg_check(cls, device_id: int, json_data: dict) -> dict: parsed_args = {'device_id': device_id} if not isinstance(device_id, int): raise ValueError("'device_id' must be an integer") if 'hostname' not in json_data: raise ValueError("POST data must include new 'hostname'") else: if not Device.valid_hostname(json_data['hostname']): raise ValueError("Provided hostname is not valid") else: parsed_args['new_hostname'] = json_data['hostname'] if 'device_type' not in json_data: raise ValueError("POST data must include 'device_type'") else: try: device_type = str(json_data['device_type']).upper() except Exception: raise ValueError("'device_type' must be a string") if DeviceType.has_name(device_type): parsed_args['device_type'] = device_type else: raise ValueError("Invalid 'device_type' provided") if 'mlag_peer_id' in json_data or 'mlag_peer_hostname' in json_data: if 'mlag_peer_id' not in json_data or 'mlag_peer_hostname' not in json_data: raise ValueError( "Both 'mlag_peer_id' and 'mlag_peer_hostname' must be specified" ) if not isinstance(json_data['mlag_peer_id'], int): raise ValueError("'mlag_peer_id' must be an integer") if not Device.valid_hostname(json_data['mlag_peer_hostname']): raise ValueError("Provided 'mlag_peer_hostname' is not valid") parsed_args['mlag_peer_id'] = json_data['mlag_peer_id'] parsed_args['mlag_peer_new_hostname'] = json_data[ 'mlag_peer_hostname'] if 'neighbors' in json_data and json_data['neighbors'] is not None: if isinstance(json_data['neighbors'], list): for neighbor in json_data['neighbors']: if not Device.valid_hostname(neighbor): raise ValueError( "Invalid hostname specified in neighbor list") parsed_args['neighbors'] = json_data['neighbors'] else: raise ValueError( "Neighbors must be specified as either a list of hostnames," "an empty list, or not specified at all") else: parsed_args['neighbors'] = None return parsed_args
def post(self): json_data = request.get_json() data = {} errors = [] data, errors = Device.validate(**json_data) if errors != []: return empty_result(status='error', data=errors), 404 with sqla_session() as session: instance: Device = session.query(Device).filter( Device.hostname == data['hostname']).one_or_none() if instance is not None: errors.append('Device already exists') return errors Device.device_add(**json_data) return empty_result(status='success'), 200
def verify_dir_structure(path: str, dir_structure: dict): """Verify that given path complies to given directory structure. Raises: VerifyPathException """ for item, subitem in dir_structure.items(): if isinstance(subitem, str) and subitem == 'file': filename = os.path.join(path, item) if not os.path.isfile(filename): if os.path.exists(filename): raise VerifyPathException( f"{filename} is not a regular file") else: raise VerifyPathException(f"File {filename} not found") elif item is Device: for hostname in os.listdir(path): hostname_path = os.path.join(path, hostname) if not os.path.isdir(hostname_path) or hostname.startswith( '.'): continue if not Device.valid_hostname(hostname): continue verify_dir_structure(hostname_path, subitem) else: dirname = os.path.join(path, item) if not os.path.isdir(dirname): if os.path.exists(dirname): raise VerifyPathException(f"{dirname} is not a directory") else: raise VerifyPathException(f"Directory {dirname} not found") if subitem: verify_dir_structure(os.path.join(path, item), dir_structure[item])
def post(self, hostname: str): """Restore configuration to previous version""" json_data = request.get_json() apply_kwargs = {'hostname': hostname} config = None if not Device.valid_hostname(hostname): return empty_result(status='error', data=f"Invalid hostname specified"), 400 if 'job_id' in json_data: try: job_id = int(json_data['job_id']) except Exception: return empty_result('error', "job_id must be an integer"), 400 else: return empty_result('error', "job_id must be specified"), 400 with sqla_session() as session: try: prev_config_result = Job.get_previous_config(session, hostname, job_id=job_id) failed = prev_config_result['failed'] if not failed and 'config' in prev_config_result: config = prev_config_result['config'] except JobNotFoundError as e: return empty_result('error', str(e)), 404 except InvalidJobError as e: return empty_result('error', str(e)), 500 except Exception as e: return empty_result('error', "Unhandled exception: {}".format(e)), 500 if failed: return empty_result( 'error', "The specified job_id has a failed status"), 400 if not config: return empty_result('error', "No config found in this job"), 500 if 'dry_run' in json_data and isinstance(json_data['dry_run'], bool) \ and not json_data['dry_run']: apply_kwargs['dry_run'] = False else: apply_kwargs['dry_run'] = True apply_kwargs['config'] = config scheduler = Scheduler() job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.sync_devices:apply_config', when=1, scheduled_by=get_jwt_identity(), kwargs=apply_kwargs, ) res = empty_result(data=f"Scheduled job to restore {hostname}") res['job_id'] = job_id return res, 200
def get(self): """ Get settings """ args = request.args hostname = None device_type = None model = None if 'hostname' in args: if Device.valid_hostname(args['hostname']): hostname = args['hostname'] else: return empty_result('error', "Invalid hostname specified"), 400 with sqla_session() as session: dev: Device = session.query(Device).\ filter(Device.hostname == hostname).one_or_none() if dev: device_type = dev.device_type model = dev.model else: return empty_result('error', "Hostname not found in database"), 400 if 'device_type' in args: if DeviceType.has_name(args['device_type'].upper()): device_type = DeviceType[args['device_type'].upper()] else: return empty_result('error', "Invalid device type specified"), 400 try: settings, settings_origin = get_settings(hostname, device_type, model) except Exception as e: return empty_result('error', "Error getting settings: {}".format(str(e))), 400 return empty_result(data={'settings': settings, 'settings_origin': settings_origin})
def get(self, hostname: str): """ Get device configuration """ result = empty_result() result['data'] = {'config': None} if not Device.valid_hostname(hostname): return empty_result(status='error', data=f"Invalid hostname specified"), 400 try: config, template_vars = cnaas_nms.confpush.sync_devices.generate_only( hostname) result['data']['config'] = { 'hostname': hostname, 'generated_config': config, 'available_variables': template_vars } except Exception as e: logger.exception( f"Exception while generating config for device {hostname}") return empty_result( status='error', data="Exception while generating config for device {}: {} {}". format(hostname, type(e), str(e))), 500 return result
def settings_syncstatus( updated_settings: set) -> Tuple[Set[DeviceType], Set[str]]: """Determine what devices has become unsynchronized after updating the settings repository.""" unsynced_devtypes = set() unsynced_hostnames = set() filename: str for filename in updated_settings: basedir = filename.split(os.path.sep)[0] if basedir not in DIR_STRUCTURE: continue if basedir.startswith('global'): return {DeviceType.ACCESS, DeviceType.DIST, DeviceType.CORE}, set() elif basedir.startswith('fabric'): unsynced_devtypes.update({DeviceType.DIST, DeviceType.CORE}) elif basedir.startswith('access'): unsynced_devtypes.add(DeviceType.ACCESS) elif basedir.startswith('dist'): unsynced_devtypes.add(DeviceType.DIST) elif basedir.startswith('core'): unsynced_devtypes.add(DeviceType.CORE) elif basedir.startswith('devices'): try: hostname = filename.split(os.path.sep)[1] if Device.valid_hostname(hostname): unsynced_hostnames.add(hostname) except Exception as e: logger.exception( "Error in settings devices directory: {}".format(str(e))) else: logger.warn( "Unhandled settings file found {}, syncstatus not updated". format(filename)) return (unsynced_devtypes, unsynced_hostnames)
def sync_check_hash(task, force=False, job_id=None): """ Start the task which will compare device configuration hashes. Args: task: Nornir task force: Ignore device hash """ set_thread_data(job_id) logger = get_logger() if force is True: return with sqla_session() as session: stored_hash = Device.get_config_hash(session, task.host.name) if stored_hash is None: return task.host.open_connection("napalm", configuration=task.nornir.config) res = task.run(task=napalm_get, getters=["config"]) task.host.close_connection("napalm") running_config = dict(res.result)['config']['running'].encode() if running_config is None: raise Exception('Failed to get running configuration') hash_obj = sha256(running_config) running_hash = hash_obj.hexdigest() if stored_hash != running_hash: raise Exception('Device {} configuration is altered outside of CNaaS!'.format(task.host.name))
def find_mgmtdomain(session, hostnames: List[str]) -> Optional[Mgmtdomain]: """Find the corresponding management domain for a pair of distribution switches. Args: hostnames: A list of two hostnames for the distribution switches Raises: ValueError: On invalid hostnames etc """ if not isinstance(hostnames, list) or not len(hostnames) == 2: raise ValueError( "hostnames argument must be a list with two device hostnames") for hostname in hostnames: if not Device.valid_hostname(hostname): raise ValueError(f"Argument {hostname} is not a valid hostname") try: device0 = session.query(Device).filter( Device.hostname == hostnames[0]).one() except NoResultFound: raise ValueError( f"hostname {hostnames[0]} not found in device database") try: device1 = session.query(Device).filter( Device.hostname == hostnames[1]).one() except NoResultFound: raise ValueError( f"hostname {hostnames[1]} not found in device database") mgmtdomain = session.query(Mgmtdomain).\ filter( ((Mgmtdomain.device_a == device0) & (Mgmtdomain.device_b == device1)) | ((Mgmtdomain.device_a == device1) & (Mgmtdomain.device_b == device0)) ).one_or_none() return mgmtdomain
def update_config_hash(task): logger = get_logger() try: res = task.run(task=napalm_get, getters=["config"]) if not isinstance(res, MultiResult) or len(res) != 1 or not isinstance(res[0].result, dict) \ or 'config' not in res[0].result: raise Exception("Unable to get config from device") new_config_hash = calc_config_hash(task.host.name, res[0].result['config']['running']) if not new_config_hash: raise ValueError("Empty config hash") except Exception as e: logger.exception("Unable to get config hash: {}".format(str(e))) raise e else: with sqla_session() as session: Device.set_config_hash(session, task.host.name, new_config_hash) logger.debug("Config hash for {} updated to {}".format(task.host.name, new_config_hash))
def set_facts(dev: Device, facts: dict) -> dict: attr_map = { # Map NAPALM getfacts name -> device.Device member name 'vendor': 'vendor', 'model': 'model', 'os_version': 'os_version', 'serial_number': 'serial', } diff = {} # Update any attributes that has changed for dict_key, obj_member in attr_map.items(): obj_data = dev.__getattribute__(obj_member) maxlen = Device.__dict__[obj_member].property.columns[0].type.length fact_data = facts[dict_key][:maxlen] if fact_data and obj_data != fact_data: diff[obj_member] = {'old': obj_data, 'new': fact_data} dev.__setattr__(obj_member, fact_data) return diff
def test_add_dist_device(self): with sqla_session() as session: #TODO: get params from testdata.yml new_device = Device() new_device.ztp_mac = '08002708a8be' new_device.hostname = 'eosdist' new_device.platform = 'eos' new_device.management_ip = IPv4Address('10.0.1.22') new_device.state = DeviceState.MANAGED new_device.device_type = DeviceType.DIST result = session.add(new_device) pprint.pprint(result)
def post(self): json_data = request.get_json() kwargs: dict = {} if 'hostname' in json_data: hostname = str(json_data['hostname']) if not Device.valid_hostname(hostname): return empty_result( status='error', data=f"Hostname '{hostname}' is not a valid hostname"), 400 with sqla_session() as session: dev: Device = session.query(Device).\ filter(Device.hostname == hostname).one_or_none() if not dev or dev.state != DeviceState.MANAGED: return empty_result( status='error', data= f"Hostname '{hostname}' not found or is not a managed device" ), 400 kwargs['hostname'] = hostname what = hostname elif 'device_type' in json_data: if DeviceType.has_name(str(json_data['device_type']).upper()): kwargs['device_type'] = str(json_data['device_type']).upper() else: return empty_result( status='error', data= f"Invalid device type '{json_data['device_type']}' specified" ), 400 what = f"{json_data['device_type']} devices" elif 'all' in json_data and isinstance(json_data['all'], bool) and json_data['all']: what = "all devices" else: return empty_result( status='error', data=f"No devices to synchronize was specified"), 400 if 'dry_run' in json_data and isinstance(json_data['dry_run'], bool) \ and not json_data['dry_run']: kwargs['dry_run'] = False if 'force' in json_data and isinstance(json_data['force'], bool): kwargs['force'] = json_data['force'] scheduler = Scheduler() job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.sync_devices:sync_devices', when=1, kwargs=kwargs) res = empty_result(data=f"Scheduled job to synchronize {what}") res['job_id'] = job_id return res
def post(self): """ Add a new linknet """ json_data = request.get_json() data = {} errors = [] if 'device_a' in json_data: if not Device.valid_hostname(json_data['device_a']): errors.append("Invalid hostname specified for device_a") else: errors.append("Required field hostname_a not found") if 'device_b' in json_data: if not Device.valid_hostname(json_data['device_b']): errors.append("Invalid hostname specified for device_b") else: errors.append("Required field hostname_b not found") if 'device_a_port' not in json_data: errors.append("Required field device_a_port not found") if 'device_b_port' not in json_data: errors.append("Required field device_b_port not found") if errors: return empty_result(status='error', data=errors), 400 with sqla_session() as session: try: new_prefix = find_free_infra_linknet(session) new_linknet = Linknet.create_linknet( session, json_data['device_a'], json_data['device_a_port'], json_data['device_b'], json_data['device_b_port'], new_prefix) session.add(new_linknet) session.commit() data = new_linknet.as_dict() except Exception as e: session.rollback() return empty_result(status='error', data=str(e)), 500 return empty_result(status='success', data=data), 201
def get_mlag_vars(session, dev: Device) -> dict: ret = { 'mlag_peer': False, 'mlag_peer_hostname': None, 'mlag_peer_low': None } mlag_peer: Device = dev.get_mlag_peer(session) if not mlag_peer: return ret ret['mlag_peer'] = True ret['mlag_peer_hostname'] = mlag_peer.hostname if dev.id < mlag_peer.id: ret['mlag_peer_low'] = True else: ret['mlag_peer_low'] = False return ret
def post(self, device_id: int): if not isinstance(device_id, int): return empty_result(status='error', data="'device_id' must be an integer"), 400 json_data = request.get_json() if 'hostname' not in json_data: return empty_result( status='error', data="POST data must include new 'hostname'"), 400 else: if not Device.valid_hostname(json_data['hostname']): return empty_result(status='error', data='Provided hostname is not valid'), 400 else: new_hostname = json_data['hostname'] if 'device_type' not in json_data: return empty_result( status='error', data="POST data must include 'device_type'"), 400 else: try: device_type = str(json_data['device_type']).upper() except: return empty_result(status='error', data="'device_type' must be a string"), 400 if not DeviceType.has_name(device_type): return empty_result(status='error', data="Invalid 'device_type' provided"), 400 if device_type == DeviceType.ACCESS.name: scheduler = Scheduler() job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.init_device:init_access_device_step1', when=1, kwargs={ 'device_id': device_id, 'new_hostname': new_hostname }) res = empty_result( data=f"Scheduled job to initialize device_id { device_id }") res['job_id'] = job_id return res
def get_evpn_spines(session, settings: dict): logger = get_logger() device_hostnames = [] for entry in settings['evpn_peers']: if 'hostname' in entry and Device.valid_hostname(entry['hostname']): device_hostnames.append(entry['hostname']) else: logger.error( "Invalid entry specified in settings->evpn_peers, ignoring: {}" .format(entry)) ret = [] for hostname in device_hostnames: dev = session.query(Device).filter( Device.hostname == hostname).one_or_none() if dev: ret.append(dev) return ret
def post(self): """ Start update facts of device(s) """ json_data = request.get_json() kwargs: dict = {} total_count: Optional[int] = None if 'hostname' in json_data: hostname = str(json_data['hostname']) if not Device.valid_hostname(hostname): return empty_result( status='error', data=f"Hostname '{hostname}' is not a valid hostname"), 400 with sqla_session() as session: dev: Device = session.query(Device). \ filter(Device.hostname == hostname).one_or_none() if not dev or (dev.state != DeviceState.MANAGED and dev.state != DeviceState.UNMANAGED): return empty_result( status='error', data= f"Hostname '{hostname}' not found or is in invalid state" ), 400 kwargs['hostname'] = hostname total_count = 1 else: return empty_result( status='error', data="No target to be updated was specified"), 400 scheduler = Scheduler() job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.update:update_facts', when=1, scheduled_by=get_jwt_identity(), kwargs=kwargs) res = empty_result( data=f"Scheduled job to update facts for {hostname}") res['job_id'] = job_id resp = make_response(json.dumps(res), 200) if total_count: resp.headers['X-Total-Count'] = total_count resp.headers['Content-Type'] = "application/json" return resp
def get_evpn_peers(session, settings: dict): logger = get_logger() device_hostnames = [] for entry in settings['evpn_peers']: if 'hostname' in entry and Device.valid_hostname(entry['hostname']): device_hostnames.append(entry['hostname']) else: logger.error("Invalid entry specified in settings->evpn_peers, ignoring: {}".format(entry)) ret = [] for hostname in device_hostnames: dev = session.query(Device).filter(Device.hostname == hostname).one_or_none() if dev: ret.append(dev) # If no evpn_peers were specified return a list of all CORE devices instead if not ret: core_devs = session.query(Device).filter(Device.device_type == DeviceType.CORE).all() for dev in core_devs: ret.append(dev) return ret
def get(self, hostname: str): args = request.args result = empty_result() result['data'] = {'config': None} if not Device.valid_hostname(hostname): return empty_result(status='error', data=f"Invalid hostname specified"), 400 kwargs = {} if 'job_id' in args: try: kwargs['job_id'] = int(args['job_id']) except Exception: return empty_result('error', "job_id must be an integer"), 400 elif 'previous' in args: try: kwargs['previous'] = int(args['previous']) except Exception: return empty_result('error', "previous must be an integer"), 400 elif 'before' in args: try: kwargs['before'] = datetime.datetime.fromisoformat( args['before']) except Exception: return empty_result( 'error', "before must be a valid ISO format date time string"), 400 with sqla_session() as session: try: result['data'] = Job.get_previous_config( session, hostname, **kwargs) except JobNotFoundError as e: return empty_result('error', str(e)), 404 except InvalidJobError as e: return empty_result('error', str(e)), 500 except Exception as e: return empty_result('error', "Unhandled exception: {}".format(e)), 500 return result
def get_all_mgmtdomains(session, hostname: str) -> List[Mgmtdomain]: """ Get all mgmtdomains for a specific distribution switch. Args: session: sqla session hostname: hostname of distribution switch Raises: ValueError: on invalid hostname etc """ if not Device.valid_hostname(hostname): raise ValueError(f"Argument {hostname} is not a valid hostname") try: dev = session.query(Device).filter(Device.hostname == hostname).one() except NoResultFound: raise ValueError(f"hostname {hostname} not found in device database") mgmtdomains = session.query(Mgmtdomain). \ filter((Mgmtdomain.device_a == dev) | (Mgmtdomain.device_b == dev)).all() return mgmtdomains
def post(self, hostname: str): """Apply exact specified configuration to device without using templates""" json_data = request.get_json() apply_kwargs = {'hostname': hostname} allow_live_run = get_apidata()['allow_apply_config_liverun'] if not Device.valid_hostname(hostname): return empty_result( status='error', data=f"Invalid hostname specified" ), 400 if 'full_config' not in json_data: return empty_result('error', "full_config must be specified"), 400 if 'dry_run' in json_data and isinstance(json_data['dry_run'], bool) \ and not json_data['dry_run']: if allow_live_run: apply_kwargs['dry_run'] = False else: return empty_result('error', "Apply config live_run is not allowed"), 400 else: apply_kwargs['dry_run'] = True apply_kwargs['config'] = json_data['full_config'] scheduler = Scheduler() job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.sync_devices:apply_config', when=1, scheduled_by=get_jwt_identity(), kwargs=apply_kwargs, ) res = empty_result(data=f"Scheduled job to apply config {hostname}") res['job_id'] = job_id return res, 200
def get(self): result = empty_result() data = {'devices': Device.device_get()} return empty_result(status='success', data=data), 200
def put(self, device_id): json_data = request.get_json() errors = Device.device_update(device_id, **json_data) if errors is not None: return empty_result(status='error', data=errors), 404 return empty_result(status='success'), 200
def put(self, hostname): """Take a map of interfaces and associated values to update. Example: {"interfaces": {"Ethernet1": {"configtype": "ACCESS_AUTO"}}} """ json_data = request.get_json() data = {} errors = [] device_settings = None with sqla_session() as session: dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none() if not dev: return empty_result('error', "Device not found"), 404 updated = False if 'interfaces' in json_data and isinstance(json_data['interfaces'], dict): for if_name, if_dict in json_data['interfaces'].items(): if not isinstance(if_dict, dict): errors.append("Each interface must have a dict with data to update") continue intf: Interface = session.query(Interface).filter(Interface.device == dev).\ filter(Interface.name == if_name).one_or_none() if not intf: errors.append(f"Interface {if_name} not found") continue if intf.data and isinstance(intf.data, dict): intfdata_original = dict(intf.data) intfdata = dict(intf.data) else: intfdata_original = {} intfdata = {} if 'configtype' in if_dict: configtype = if_dict['configtype'].upper() if InterfaceConfigType.has_name(configtype): if intf.configtype != InterfaceConfigType[configtype]: intf.configtype = InterfaceConfigType[configtype] updated = True data[if_name] = {'configtype': configtype} else: errors.append(f"Invalid configtype received: {configtype}") if 'data' in if_dict: # TODO: maybe this validation should be done via # pydantic if it gets more complex if not device_settings: device_settings, _ = get_settings(hostname, dev.device_type) if 'vxlan' in if_dict['data']: if if_dict['data']['vxlan'] in device_settings['vxlans']: intfdata['vxlan'] = if_dict['data']['vxlan'] else: errors.append("Specified VXLAN {} is not present in {}".format( if_dict['data']['vxlan'], hostname )) if 'untagged_vlan' in if_dict['data']: vlan_id = resolve_vlanid(if_dict['data']['untagged_vlan'], device_settings['vxlans']) if vlan_id: intfdata['untagged_vlan'] = if_dict['data']['untagged_vlan'] else: errors.append("Specified VLAN name {} is not present in {}".format( if_dict['data']['untagged_vlan'], hostname )) if 'tagged_vlan_list' in if_dict['data']: if isinstance(if_dict['data']['tagged_vlan_list'], list): vlan_id_list = resolve_vlanid_list(if_dict['data']['tagged_vlan_list'], device_settings['vxlans']) if len(vlan_id_list) == len(if_dict['data']['tagged_vlan_list']): intfdata['tagged_vlan_list'] = if_dict['data']['tagged_vlan_list'] else: errors.append("Some VLAN names {} are not present in {}".format( ", ".join(if_dict['data']['tagged_vlan_list']), hostname )) else: errors.append("tagged_vlan_list should be of type list, found {}".format( type(if_dict['data']['tagged_vlan_list']) )) if 'neighbor' in if_dict['data']: if isinstance(if_dict['data']['neighbor'], str) and \ Device.valid_hostname(if_dict['data']['neighbor']): intfdata['neighbor'] = if_dict['data']['neighbor'] else: errors.append("Neighbor must be valid hostname, got: {}".format( if_dict['data']['neighbor'])) if 'description' in if_dict['data']: if isinstance(if_dict['data']['description'], str) and \ len(if_dict['data']['description']) <= 64: if if_dict['data']['description']: intfdata['description'] = if_dict['data']['description'] elif 'description' in intfdata: del intfdata['description'] elif if_dict['data']['description'] is None: if 'description' in intfdata: del intfdata['description'] else: errors.append( "Description must be a string of 0-64 characters for: {}". format(if_dict['data']['description'])) if 'enabled' in if_dict['data']: if type(if_dict['data']['enabled']) == bool: intfdata['enabled'] = if_dict['data']['enabled'] else: errors.append( "Enabled must be a bool, true or false, got: {}". format(if_dict['data']['enabled'])) if intfdata != intfdata_original: intf.data = intfdata updated = True if if_name in data: data[if_name]['data'] = intfdata else: data[if_name] = {'data': intfdata} if updated: dev.synchronized = False if errors: if data: ret = {'errors': errors, 'updated': data} else: ret = {'errors': errors} return empty_result(status='error', data=ret), 400 else: return empty_result(status='success', data={'updated': data})
t = Site() t.description = 'default' from sqlalchemy.orm import sessionmaker Session = sessionmaker(bind=engine) session = Session() session.add(t) print(session.new) session.commit() td = Device() td.description = 'Test device!' td.hostname = 'testdevice' td.management_ip = '1.2.3.4' td.platform = 'eos' td.site = t td.state = DeviceState.UNKNOWN td.device_type = DeviceType.UNKNOWN session.add(td) print(session.new) session.commit()
except Exception as e: print(str(e)) sys.exit(2) with sqla_session() as session: db_entry: Device = session.query(Device).filter( Device.ztp_mac == ztp_mac).first() if db_entry: if db_entry.state == DeviceState.DHCP_BOOT: db_entry.state = DeviceState.DISCOVERED db_entry.dhcp_ip = dhcp_ip logger.info( "New device booted via DHCP to state DISCOVERED: {}". format(ztp_mac)) else: logger.error( "New device booted via DHCP in unhandled state {}: {}". format(db_entry.state, ztp_mac)) else: # TODO: look for entries with same dhcp_ip in DB and set them to null (they are stale) new_device = Device() new_device.ztp_mac = ztp_mac new_device.dhcp_ip = dhcp_ip new_device.hostname = f'mac-{ztp_mac}' new_device.platform = platform new_device.state = DeviceState.DHCP_BOOT new_device.device_type = DeviceType.UNKNOWN session.add(new_device) logger.info( "New device booted via DHCP to state DHCP_BOOT: {}".format( ztp_mac))
def post(self): json_data = request.get_json() data = {} errors = [] with sqla_session() as session: if 'device_a' in json_data: hostname_a = str(json_data['device_a']) if not Device.valid_hostname(hostname_a): errors.append( f"Invalid hostname for device_a: {hostname_a}") else: device_a = session.query(Device).\ filter(Device.hostname == hostname_a).one_or_none() if not device_a: errors.append( f"Device with hostname {hostname_a} not found") else: data['device_a'] = device_a if 'device_b' in json_data: hostname_b = str(json_data['device_b']) if not Device.valid_hostname(hostname_b): errors.append( f"Invalid hostname for device_b: {hostname_b}") else: device_b = session.query(Device).\ filter(Device.hostname == hostname_b).one_or_none() if not device_b: errors.append( f"Device with hostname {hostname_b} not found") else: data['device_b'] = device_b if 'vlan' in json_data: try: vlan_id_int = int(json_data['vlan']) except: errors.append('Invalid VLAN received.') else: data['vlan'] = vlan_id_int if 'ipv4_gw' in json_data: try: addr = IPv4Interface(json_data['ipv4_gw']) prefix_len = int(addr.network.prefixlen) except: errors.append(('Invalid ipv4_gw received. ' 'Must be correct IPv4 address with mask')) else: if prefix_len <= 31 and prefix_len >= 16: data['ipv4_gw'] = str(addr) else: errors.append( "Bad prefix length for management network: {}". format(prefix_len)) required_keys = ['device_a', 'device_b', 'vlan', 'ipv4_gw'] if all([key in data for key in required_keys]): new_mgmtd = Mgmtdomain() new_mgmtd.device_a = data['device_a'] new_mgmtd.device_b = data['device_b'] new_mgmtd.ipv4_gw = data['ipv4_gw'] new_mgmtd.vlan = data['vlan'] result = session.add(new_mgmtd) return empty_result(result, 200) else: errors.append("Not all required inputs were found: {}".\ format(', '.join(required_keys))) return empty_result('error', errors), 400
def populate_device_vars(session, dev: Device, ztp_hostname: Optional[str] = None, ztp_devtype: Optional[DeviceType] = None): logger = get_logger() device_variables = { 'device_model': dev.model, 'device_os_version': dev.os_version } if ztp_hostname: hostname: str = ztp_hostname else: hostname: str = dev.hostname if ztp_devtype: devtype: DeviceType = ztp_devtype elif dev.device_type != DeviceType.UNKNOWN: devtype: DeviceType = dev.device_type else: raise Exception("Can't populate device vars for device type UNKNOWN") mgmt_ip = dev.management_ip if not ztp_hostname: if not mgmt_ip: raise Exception("Could not find management IP for device {}".format(hostname)) else: device_variables['mgmt_ip'] = str(mgmt_ip) if isinstance(dev.platform, str): platform: str = dev.platform else: raise ValueError("Unknown platform: {}".format(dev.platform)) settings, settings_origin = get_settings(hostname, devtype, dev.model) if devtype == DeviceType.ACCESS: if ztp_hostname: access_device_variables = { 'interfaces': [] } else: mgmtdomain = cnaas_nms.db.helper.find_mgmtdomain_by_ip(session, dev.management_ip) if not mgmtdomain: raise Exception( "Could not find appropriate management domain for management_ip: {}". format(dev.management_ip)) mgmt_gw_ipif = IPv4Interface(mgmtdomain.ipv4_gw) access_device_variables = { 'mgmt_vlan_id': mgmtdomain.vlan, 'mgmt_gw': str(mgmt_gw_ipif.ip), 'mgmt_ipif': str(IPv4Interface('{}/{}'.format(mgmt_ip, mgmt_gw_ipif.network.prefixlen))), 'mgmt_ip': str(mgmt_ip), 'mgmt_prefixlen': int(mgmt_gw_ipif.network.prefixlen), 'interfaces': [] } intfs = session.query(Interface).filter(Interface.device == dev).all() intf: Interface for intf in intfs: untagged_vlan = None tagged_vlan_list = [] intfdata = None try: ifindexnum: int = Interface.interface_index_num(intf.name) except ValueError as e: ifindexnum: int = 0 if intf.data: if 'untagged_vlan' in intf.data: untagged_vlan = resolve_vlanid(intf.data['untagged_vlan'], settings['vxlans']) if 'tagged_vlan_list' in intf.data: tagged_vlan_list = resolve_vlanid_list(intf.data['tagged_vlan_list'], settings['vxlans']) intfdata = dict(intf.data) access_device_variables['interfaces'].append({ 'name': intf.name, 'ifclass': intf.configtype.name, 'untagged_vlan': untagged_vlan, 'tagged_vlan_list': tagged_vlan_list, 'data': intfdata, 'indexnum': ifindexnum }) mlag_vars = get_mlag_vars(session, dev) device_variables = {**device_variables, **access_device_variables, **mlag_vars} elif devtype == DeviceType.DIST or devtype == DeviceType.CORE: infra_ip = dev.infra_ip asn = generate_asn(infra_ip) fabric_device_variables = { 'interfaces': [], 'bgp_ipv4_peers': [], 'bgp_evpn_peers': [], 'mgmtdomains': [], 'asn': asn } if mgmt_ip and infra_ip: mgmt_device_variables = { 'mgmt_ipif': str(IPv4Interface('{}/32'.format(mgmt_ip))), 'mgmt_prefixlen': 32, 'infra_ipif': str(IPv4Interface('{}/32'.format(infra_ip))), 'infra_ip': str(infra_ip), } fabric_device_variables = {**fabric_device_variables, **mgmt_device_variables} # find fabric neighbors fabric_interfaces = {} for neighbor_d in dev.get_neighbors(session): if neighbor_d.device_type == DeviceType.DIST or neighbor_d.device_type == DeviceType.CORE: # TODO: support multiple links to the same neighbor? local_if = dev.get_neighbor_local_ifname(session, neighbor_d) local_ipif = dev.get_neighbor_local_ipif(session, neighbor_d) neighbor_ip = dev.get_neighbor_ip(session, neighbor_d) if local_if: fabric_interfaces[local_if] = { 'name': local_if, 'ifclass': 'fabric', 'ipv4if': local_ipif, 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_ip': str(neighbor_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) } fabric_device_variables['bgp_ipv4_peers'].append({ 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_ip': str(neighbor_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) }) ifname_peer_map = dev.get_linknet_localif_mapping(session) if 'interfaces' in settings and settings['interfaces']: for intf in settings['interfaces']: try: ifindexnum: int = Interface.interface_index_num(intf['name']) except ValueError as e: ifindexnum: int = 0 if 'ifclass' not in intf: continue if intf['ifclass'] == 'downlink': data = {} if intf['name'] in ifname_peer_map: data['description'] = ifname_peer_map[intf['name']] fabric_device_variables['interfaces'].append({ 'name': intf['name'], 'ifclass': intf['ifclass'], 'indexnum': ifindexnum, 'data': data }) elif intf['ifclass'] == 'custom': fabric_device_variables['interfaces'].append({ 'name': intf['name'], 'ifclass': intf['ifclass'], 'config': intf['config'], 'indexnum': ifindexnum }) elif intf['ifclass'] == 'fabric': if intf['name'] in fabric_interfaces: fabric_device_variables['interfaces'].append( {**fabric_interfaces[intf['name']], **{'indexnum': ifindexnum}} ) del fabric_interfaces[intf['name']] else: fabric_device_variables['interfaces'].append({ 'name': intf['name'], 'ifclass': intf['ifclass'], 'indexnum': ifindexnum, 'ipv4if': None, 'peer_hostname': 'ztp', 'peer_infra_lo': None, 'peer_ip': None, 'peer_asn': None }) for local_if, data in fabric_interfaces.items(): logger.warn(f"Interface {local_if} on device {hostname} not " "configured as linknet because of wrong ifclass") if not ztp_hostname: for mgmtdom in cnaas_nms.db.helper.get_all_mgmtdomains(session, hostname): fabric_device_variables['mgmtdomains'].append({ 'id': mgmtdom.id, 'ipv4_gw': mgmtdom.ipv4_gw, 'vlan': mgmtdom.vlan, 'description': mgmtdom.description, 'esi_mac': mgmtdom.esi_mac }) # populate evpn peers data for neighbor_d in get_evpn_peers(session, settings): if neighbor_d.hostname == dev.hostname: continue fabric_device_variables['bgp_evpn_peers'].append({ 'peer_hostname': neighbor_d.hostname, 'peer_infra_lo': str(neighbor_d.infra_ip), 'peer_asn': generate_asn(neighbor_d.infra_ip) }) device_variables = {**device_variables, **fabric_device_variables} # Add all environment variables starting with TEMPLATE_SECRET_ to # the list of configuration variables. The idea is to store secret # configuration outside of the templates repository. template_secrets = {} for env in os.environ: if env.startswith('TEMPLATE_SECRET_'): template_secrets[env] = os.environ[env] # Merge all dicts with variables into one, later row overrides # Device variables override any names from settings, for example the # interfaces list from settings are replaced with an interface list from # device variables that contains more information device_variables = {**settings, **device_variables, **template_secrets} return device_variables