def get_app(): from cnaas_nms.scheduler.scheduler import Scheduler from cnaas_nms.plugins.pluginmanager import PluginManagerHandler from cnaas_nms.db.session import sqla_session from cnaas_nms.db.joblock import Joblock from cnaas_nms.db.job import Job # If running inside uwsgi, a separate "mule" will run the scheduler try: import uwsgi print("Running inside uwsgi") except (ModuleNotFoundError, ImportError): scheduler = Scheduler() scheduler.start() pmh = PluginManagerHandler() pmh.load_plugins() try: with sqla_session() as session: Joblock.clear_locks(session) except Exception as e: print("Unable to clear old locks from database at startup: {}".format(str(e))) try: with sqla_session() as session: Job.clear_jobs(session) except Exception as e: print("Unable to clear jobs with invalid states: {}".format(str(e))) return app.app
def get(self): """ List all plugins """ try: pmh = PluginManagerHandler() plugindata = pmh.get_plugindata() plugin_module_names = pmh.get_plugins() except Exception as e: return empty_result('error', "Error retrieving plugins {}".format(str(e))) else: return empty_result('success', { 'loaded_plugins': plugin_module_names, 'plugindata': plugindata })
def main_loop(): try: import uwsgi except Exception as e: logger.exception("Mule not running in uwsgi, exiting: {}".format( str(e))) print("Error, not running in uwsgi") return print("Running scheduler in uwsgi mule") scheduler = Scheduler() scheduler.start() pmh = PluginManagerHandler() pmh.load_plugins() try: with sqla_session() as session: Joblock.clear_locks(session) except Exception as e: logger.exception( "Unable to clear old locks from database at startup: {}".format( str(e))) while True: mule_data = uwsgi.mule_get_msg() data: dict = json.loads(mule_data) if data['when'] and isinstance(data['when'], int): data['run_date'] = datetime.datetime.utcnow() + datetime.timedelta( seconds=data['when']) del data['when'] kwargs = {} for k, v in data.items(): if k not in ['func', 'trigger', 'id', 'run_date']: kwargs[k] = v # Perform pre-schedule job checks try: if not pre_schedule_checks(scheduler, kwargs): continue except Exception as e: logger.exception( "Unable to perform pre-schedule job checks: {}".format(e)) scheduler.add_job(data['func'], trigger=data['trigger'], kwargs=kwargs, id=data['id'], run_date=data['run_date'], name=data['func'])
def put(self): """ Modify plugins """ json_data = request.get_json() if 'action' in json_data: if str(json_data['action']).upper() == 'SELFTEST': pmh = PluginManagerHandler() res = pmh.pm.hook.selftest() return empty_result('success', {'result': res}) else: return empty_result('error', "Unknown action specified"), 400 else: return empty_result('error', "No action specified"), 400
def init_device_step2(device_id: int, iteration: int = -1, job_id: Optional[str] = None, scheduled_by: Optional[str] = None) -> \ NornirJobResult: logger = get_logger() # step4+ in apjob: if success, update management ip and device state, trigger external stuff? with sqla_session() as session: dev = session.query(Device).filter(Device.id == device_id).one() if dev.state != DeviceState.INIT: logger.error("Device with ID {} got to init step2 but is in incorrect state: {}".\ format(device_id, dev.state.name)) raise DeviceStateException( "Device must be in state INIT to continue init step 2") hostname = dev.hostname devtype: DeviceType = dev.device_type nr = cnaas_nms.confpush.nornir_helper.cnaas_init() nr_filtered = nr.filter(name=hostname) nrresult = nr_filtered.run(task=napalm_get, getters=["facts"]) if nrresult.failed: next_job_id = schedule_init_device_step2(device_id, iteration, scheduled_by) if next_job_id: return NornirJobResult(nrresult=nrresult, next_job_id=next_job_id) else: return NornirJobResult(nrresult=nrresult) try: facts = nrresult[hostname][0].result['facts'] found_hostname = facts['hostname'] except: raise InitError("Could not log in to device during init step 2") if hostname != found_hostname: raise InitError("Newly initialized device presents wrong hostname") with sqla_session() as session: dev: Device = session.query(Device).filter( Device.id == device_id).one() dev.state = DeviceState.MANAGED dev.synchronized = False set_facts(dev, facts) management_ip = dev.management_ip dev.dhcp_ip = None # Plugin hook: new managed device # Send: hostname , device type , serial , platform , vendor , model , os version try: pmh = PluginManagerHandler() pmh.pm.hook.new_managed_device(hostname=hostname, device_type=devtype.name, serial_number=facts['serial_number'], vendor=facts['vendor'], model=facts['model'], os_version=facts['os_version'], management_ip=str(management_ip)) except Exception as e: logger.exception( "Error while running plugin hooks for new_managed_device: ".format( str(e))) return NornirJobResult(nrresult=nrresult)
def init_fabric_device_step1( device_id: int, new_hostname: str, device_type: str, neighbors: Optional[List[str]] = [], job_id: Optional[str] = None, scheduled_by: Optional[str] = None) -> NornirJobResult: """Initialize fabric (CORE/DIST) device for management by CNaaS-NMS. Args: device_id: Device to select for initialization new_hostname: Hostname to configure on this device device_type: String representing DeviceType neighbors: Optional list of hostnames of peer devices job_id: job_id provided by scheduler when adding job scheduled_by: Username from JWT. Returns: Nornir result object Raises: DeviceStateException ValueError """ logger = get_logger() if DeviceType.has_name(device_type): devtype = DeviceType[device_type] else: raise ValueError("Invalid 'device_type' provided") if devtype not in [DeviceType.CORE, DeviceType.DIST]: raise ValueError( "Init fabric device requires device type DIST or CORE") with sqla_session() as session: dev = pre_init_checks(session, device_id) # Test update of linknets using LLDP data linknets = update_linknets(session, dev.hostname, devtype, ztp_hostname=new_hostname, dry_run=True) try: verified_neighbors = pre_init_check_neighbors( session, dev, devtype, linknets, neighbors) logger.debug("Found valid neighbors for INIT of {}: {}".format( new_hostname, ", ".join(verified_neighbors))) check_neighbor_sync(session, verified_neighbors) except Exception as e: raise e else: dev.state = DeviceState.INIT dev.device_type = devtype session.commit() # If neighbor check works, commit new linknets # This will also mark neighbors as unsynced linknets = update_linknets(session, dev.hostname, devtype, ztp_hostname=new_hostname, dry_run=False) logger.debug("New linknets for INIT of {} created: {}".format( new_hostname, linknets)) # Select and reserve a new management and infra IP for the device ReservedIP.clean_reservations(session, device=dev) session.commit() mgmt_ip = cnaas_nms.confpush.underlay.find_free_mgmt_lo_ip(session) infra_ip = cnaas_nms.confpush.underlay.find_free_infra_ip(session) reserved_ip = ReservedIP(device=dev, ip=mgmt_ip) session.add(reserved_ip) dev.infra_ip = infra_ip session.commit() mgmt_variables = { 'mgmt_ipif': str(IPv4Interface('{}/32'.format(mgmt_ip))), 'mgmt_prefixlen': 32, 'infra_ipif': str(IPv4Interface('{}/32'.format(infra_ip))), 'infra_ip': str(infra_ip), } device_variables = populate_device_vars(session, dev, new_hostname, devtype) device_variables = {**device_variables, **mgmt_variables} # Update device state dev.hostname = new_hostname session.commit() hostname = dev.hostname nr = cnaas_nms.confpush.nornir_helper.cnaas_init() nr_filtered = nr.filter(name=hostname) # TODO: certicate # step2. push management config nrresult = nr_filtered.run(task=push_base_management, device_variables=device_variables, devtype=devtype, job_id=job_id) with sqla_session() as session: dev = session.query(Device).filter(Device.id == device_id).one() dev.management_ip = mgmt_ip # Remove the reserved IP since it's now saved in the device database instead reserved_ip = session.query(ReservedIP).filter( ReservedIP.device == dev).one_or_none() if reserved_ip: session.delete(reserved_ip) # Plugin hook, allocated IP try: pmh = PluginManagerHandler() pmh.pm.hook.allocated_ipv4(vrf='mgmt', ipv4_address=str(mgmt_ip), ipv4_network=None, hostname=hostname) except Exception as e: logger.exception( "Error while running plugin hooks for allocated_ipv4: ".format( str(e))) # step3. resync neighbors scheduler = Scheduler() sync_nei_job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.sync_devices:sync_devices', when=1, scheduled_by=scheduled_by, kwargs={ 'hostnames': verified_neighbors, 'dry_run': False }) logger.info(f"Scheduled job {sync_nei_job_id} to resynchronize neighbors") # step4. register apscheduler job that continues steps scheduler = Scheduler() next_job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.init_device:init_device_step2', when=60, scheduled_by=scheduled_by, kwargs={ 'device_id': device_id, 'iteration': 1 }) logger.info("Init step 2 for {} scheduled as job # {}".format( new_hostname, next_job_id)) return NornirJobResult(nrresult=nrresult, next_job_id=next_job_id)
def init_access_device_step1( device_id: int, new_hostname: str, mlag_peer_id: Optional[int] = None, mlag_peer_new_hostname: Optional[str] = None, uplink_hostnames_arg: Optional[List[str]] = [], job_id: Optional[str] = None, scheduled_by: Optional[str] = None) -> NornirJobResult: """Initialize access device for management by CNaaS-NMS. If a MLAG/MC-LAG pair is to be configured both mlag_peer_id and mlag_peer_new_hostname must be set. Args: device_id: Device to select for initialization new_hostname: Hostname to configure on this device mlag_peer_id: Device ID of MLAG peer device (optional) mlag_peer_new_hostname: Hostname to configure on peer device (optional) uplink_hostnames_arg: List of hostnames of uplink peer devices (optional) Used when initializing MLAG peer device job_id: job_id provided by scheduler when adding job scheduled_by: Username from JWT. Returns: Nornir result object Raises: DeviceStateException ValueError """ logger = get_logger() with sqla_session() as session: dev = pre_init_checks(session, device_id) # update linknets using LLDP data update_linknets(session, dev.hostname, DeviceType.ACCESS) # If this is the first device in an MLAG pair if mlag_peer_id and mlag_peer_new_hostname: mlag_peer_dev = pre_init_checks(session, mlag_peer_id) update_linknets(session, mlag_peer_dev.hostname, DeviceType.ACCESS) update_interfacedb_worker( session, dev, replace=True, delete_all=False, mlag_peer_hostname=mlag_peer_dev.hostname) update_interfacedb_worker(session, mlag_peer_dev, replace=True, delete_all=False, mlag_peer_hostname=dev.hostname) uplink_hostnames = dev.get_uplink_peer_hostnames(session) uplink_hostnames += mlag_peer_dev.get_uplink_peer_hostnames( session) # check that both devices see the correct MLAG peer pre_init_check_mlag(session, dev, mlag_peer_dev) pre_init_check_mlag(session, mlag_peer_dev, dev) # If this is the second device in an MLAG pair elif uplink_hostnames_arg: uplink_hostnames = uplink_hostnames_arg elif mlag_peer_id or mlag_peer_new_hostname: raise ValueError( "mlag_peer_id and mlag_peer_new_hostname must be specified together" ) # If this device is not part of an MLAG pair else: update_interfacedb_worker(session, dev, replace=True, delete_all=False) uplink_hostnames = dev.get_uplink_peer_hostnames(session) # TODO: check compatability, same dist pair and same ports on dists mgmtdomain = cnaas_nms.db.helper.find_mgmtdomain( session, uplink_hostnames) if not mgmtdomain: raise Exception( "Could not find appropriate management domain for uplink peer devices: {}" .format(uplink_hostnames)) # Select a new management IP for the device ReservedIP.clean_reservations(session, device=dev) session.commit() mgmt_ip = mgmtdomain.find_free_mgmt_ip(session) if not mgmt_ip: raise Exception( "Could not find free management IP for management domain {}/{}" .format(mgmtdomain.id, mgmtdomain.description)) reserved_ip = ReservedIP(device=dev, ip=mgmt_ip) session.add(reserved_ip) # Populate variables for template rendering mgmt_gw_ipif = IPv4Interface(mgmtdomain.ipv4_gw) mgmt_variables = { 'mgmt_ipif': str( IPv4Interface('{}/{}'.format(mgmt_ip, mgmt_gw_ipif.network.prefixlen))), 'mgmt_ip': str(mgmt_ip), 'mgmt_prefixlen': int(mgmt_gw_ipif.network.prefixlen), 'mgmt_vlan_id': mgmtdomain.vlan, 'mgmt_gw': mgmt_gw_ipif.ip, } device_variables = populate_device_vars(session, dev, new_hostname, DeviceType.ACCESS) device_variables = {**device_variables, **mgmt_variables} # Update device state dev.hostname = new_hostname session.commit() hostname = dev.hostname nr = cnaas_nms.confpush.nornir_helper.cnaas_init() nr_filtered = nr.filter(name=hostname) # step2. push management config nrresult = nr_filtered.run(task=push_base_management, device_variables=device_variables, devtype=DeviceType.ACCESS, job_id=job_id) with sqla_session() as session: dev = session.query(Device).filter(Device.id == device_id).one() dev.management_ip = device_variables['mgmt_ip'] dev.state = DeviceState.INIT dev.device_type = DeviceType.ACCESS # Remove the reserved IP since it's now saved in the device database instead reserved_ip = session.query(ReservedIP).filter( ReservedIP.device == dev).one_or_none() if reserved_ip: session.delete(reserved_ip) # Plugin hook, allocated IP try: pmh = PluginManagerHandler() pmh.pm.hook.allocated_ipv4(vrf='mgmt', ipv4_address=str(mgmt_ip), ipv4_network=str(mgmt_gw_ipif.network), hostname=hostname) except Exception as e: logger.exception( "Error while running plugin hooks for allocated_ipv4: ".format( str(e))) # step3. register apscheduler job that continues steps if mlag_peer_id and mlag_peer_new_hostname: step2_delay = 30 + 60 + 30 # account for delayed start of peer device plus mgmt timeout else: step2_delay = 30 scheduler = Scheduler() next_job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.init_device:init_device_step2', when=step2_delay, scheduled_by=scheduled_by, kwargs={ 'device_id': device_id, 'iteration': 1 }) logger.info("Init step 2 for {} scheduled as job # {}".format( new_hostname, next_job_id)) if mlag_peer_id and mlag_peer_new_hostname: mlag_peer_job_id = scheduler.add_onetime_job( 'cnaas_nms.confpush.init_device:init_access_device_step1', when=60, scheduled_by=scheduled_by, kwargs={ 'device_id': mlag_peer_id, 'new_hostname': mlag_peer_new_hostname, 'uplink_hostnames_arg': uplink_hostnames, 'scheduled_by': scheduled_by }) logger.info("MLAG peer (id {}) init scheduled as job # {}".format( mlag_peer_id, mlag_peer_job_id)) return NornirJobResult(nrresult=nrresult, next_job_id=next_job_id)