def create_default_services(): admin = fetch('User', name='admin').id for service in ({ 'type': 'SwissArmyKnifeService', 'name': 'Start', 'description': 'Start point of a workflow', 'creator': admin, 'hidden': True }, { 'type': 'SwissArmyKnifeService', 'name': 'End', 'description': 'End point of a workflow', 'creator': admin, 'hidden': True }, { 'type': 'SwissArmyKnifeService', 'name': 'mail_feedback_notification', 'description': 'Mail notification (service logs)', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'slack_feedback_notification', 'description': 'Slack notification (service logs)', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'mattermost_feedback_notification', 'description': 'Mattermost notification (service logs)', 'creator': admin }): factory(service.pop('type'), **service)
def create_default_workflows() -> None: name = "Configuration Management Workflow" workflow = factory( "Workflow", **{ "name": name, "description": "Poll configuration and push to gitlab", "use_workflow_targets": False, "creator": fetch("User", name="admin").id, }, ) workflow.jobs.extend([ fetch("Service", name="poller_service"), fetch("Service", name="git_push_configurations"), ]) edges = [(0, 2, True), (2, 3, True), (2, 3, False), (3, 1, True)] for x, y, edge_type in edges: factory( "WorkflowEdge", **{ "name": f"{workflow.name} {x} -> {y} ({edge_type})", "workflow": workflow.id, "subtype": "success" if edge_type else "failure", "source": workflow.jobs[x].id, "destination": workflow.jobs[y].id, }, ) positions = [(-30, 0), (20, 0), (0, -20), (0, 30)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions[name] = x * 10, y * 10
def create_default_users(): factory('User', **{ 'name': 'admin', 'email': '*****@*****.**', 'password': '******', 'permissions': ['Admin'] })
def create_default_workflows(): name = 'Configuration Management Workflow' workflow = factory( 'Workflow', **{ 'name': name, 'description': 'Poll configuration and push to gitlab', 'use_workflow_targets': False, 'creator': fetch('User', name='admin').id }) workflow.jobs.extend([ fetch('Service', name='configuration_backup'), fetch('Service', name='git_push_configurations') ]) edges = [(0, 2, True), (2, 3, True), (2, 3, False), (3, 1, True)] for x, y, edge_type in edges: factory( 'WorkflowEdge', **{ 'name': f'{workflow.name} {x} -> {y} ({edge_type})', 'workflow': workflow.id, 'subtype': 'success' if edge_type else 'failure', 'source': workflow.jobs[x].id, 'destination': workflow.jobs[y].id }) positions = [(-30, 0), (20, 0), (0, -20), (0, 30)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions[name] = x * 10, y * 10
def create_default_services(): for service in ( { 'type': 'swiss_army_knife_service', 'name': 'Start', 'description': 'Start point of a workflow', 'hidden': True }, { 'type': 'swiss_army_knife_service', 'name': 'End', 'description': 'End point of a workflow', 'hidden': True }, { 'type': 'swiss_army_knife_service', 'name': 'mail_feedback_notification', 'description': 'Mail notification (service logs)' }, { 'type': 'swiss_army_knife_service', 'name': 'slack_feedback_notification', 'description': 'Slack notification (service logs)' }, { 'type': 'swiss_army_knife_service', 'name': 'mattermost_feedback_notification', 'description': 'Mattermost notification (service logs)' } ): factory(service.pop('type'), **service)
def create_workflow_of_workflows(): admin = fetch('User', name='admin').id devices = [fetch('Device', name='Washington').id] workflow = factory( 'Workflow', **{ 'name': 'Workflow_of_workflows', 'description': 'Test the inner workflow system', 'devices': devices, 'creator': admin, 'vendor': 'Arista', 'operating_system': 'eos' }) workflow.jobs.extend([ fetch('Job', name='payload_transfer_workflow'), fetch('Job', name='get_interfaces'), fetch('Job', name='Napalm_VRF_workflow') ]) edges = [(0, 2), (2, 3), (3, 4), (4, 1)] for x, y in edges: factory( 'WorkflowEdge', **{ 'name': f'{workflow.name} {x} -> {y}', 'workflow': workflow.id, 'subtype': 'success', 'devices': devices, 'source': workflow.jobs[x].id, 'destination': workflow.jobs[y].id }) positions = [(-30, 0), (30, 0), (0, -20), (0, 0), (0, 20)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions[ 'Workflow_of_workflows'] = x * 10, y * 10
def object_import(request: dict, file: FileStorage) -> str: if request["replace"]: delete_all("Device") result = "Topology successfully imported." if allowed_file(secure_filename(file.filename), {"xls", "xlsx"}): book = open_workbook(file_contents=file.read()) for obj_type in ("Device", "Link"): try: sheet = book.sheet_by_name(obj_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): prop = dict(zip(properties, sheet.row_values(row_index))) try: factory(obj_type, **prop).serialized except Exception as e: info(f"{str(prop)} could not be imported ({str(e)})") result = "Partial import (see logs)." db.session.commit() if request["update_pools"]: for pool in fetch_all("Pool"): pool.compute_pool() db.session.commit() return result
def configure_instance_id(): factory('Instance', **{ 'name': str(getnode()), 'description': 'Localhost', 'ip_address': '0.0.0.0', 'status': 'Up' })
def object_import(request, file): if request['replace']: delete_all('Device') result = 'Topology successfully imported.' if allowed_file(secure_filename(file.filename), {'xls', 'xlsx'}): book = open_workbook(file_contents=file.read()) for obj_type in ('Device', 'Link'): try: sheet = book.sheet_by_name(obj_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): prop = dict(zip(properties, sheet.row_values(row_index))) try: factory(obj_type, **prop).serialized except Exception as e: info(f'{str(prop)} could not be imported ({str(e)})') result = 'Partial import (see logs).' db.session.commit() if request['update_pools']: for pool in fetch_all('Pool'): pool.compute_pool() db.session.commit() return result
def create_example_services(): admin = fetch('User', name='admin').id for service in ({ 'type': 'ConfigureBgpService', 'name': 'napalm_configure_bgp_1', 'description': 'Configure BGP Peering with Napalm', 'devices': [fetch('Device', name='Washington').id], 'creator': admin, 'local_as': 100, 'loopback': 'Lo100', 'loopback_ip': '100.1.1.1', 'neighbor_ip': '100.1.2.1', 'remote_as': 200, 'vrf_name': 'configure_BGP_test', 'waiting_time': 0 }, { 'type': 'GenericFileTransferService', 'name': 'test_file_transfer_service', 'description': 'Test the file transfer service', 'devices': [fetch('Device', name='Aserver').id], 'creator': admin, 'direction': 'get', 'protocol': 'scp', 'source_file': '/media/sf_VM/eNMS/tests/file_transfer/a.bin', 'destination_file': '/media/sf_VM/eNMS/tests/file_transfer/b.bin', 'missing_host_key_policy': True }, { 'type': 'LogBackupService', 'name': 'test_log_backup_service', 'description': 'Test the log backup service', 'devices': [fetch('Device', name='Aserver').id], 'creator': admin, 'protocol': 'scp', 'destination_ip_address': '127.0.0.1', 'destination_path': '/media/sf_VM/eNMS/tests/file_transfer', 'delete_archive': True, 'delete_folder': True }, { 'type': 'DatabaseBackupService', 'name': 'test_database_backup_service', 'description': 'Test the log backup service', 'devices': [fetch('Device', name='Aserver').id], 'creator': admin, 'protocol': 'scp', 'destination_ip_address': '127.0.0.1', 'destination_path': '/media/sf_VM/eNMS/tests/file_transfer', 'delete_archive': True, 'delete_folder': True }, { 'type': 'ConfigurationBackupService', 'name': 'configuration_backup', 'description': 'Test Configuration Management', 'pools': [fetch('Pool', name='Devices only').id], 'configuration_command': 'show running-config', 'multiprocessing': True, 'creator': admin }): factory(service.pop('type'), **service)
def login(): if request.method == 'POST': name, password = request.form['name'], request.form['password'] try: if request.form['authentication_method'] == 'Local User': user = fetch('User', name=name) if user and password == user.password: login_user(user) return redirect(url_for('base_blueprint.dashboard')) elif request.form['authentication_method'] == 'LDAP Domain': with Connection(ldap_client, user=f'{app.config["LDAP_USERDN"]}\\{name}', password=password, auto_bind=True, authentication=NTLM) as connection: connection.search( app.config['LDAP_BASEDN'], f'(&(objectClass=person)(samaccountname={name}))', search_scope=SUBTREE, get_operational_attributes=True, attributes=['cn', 'memberOf', 'mail']) json_response = loads( connection.response_to_json())['entries'][0] if json_response: user = { 'name': name, 'password': password, 'email': json_response['attributes'].get('mail', '') } if any(app.config['LDAP_ADMIN_GROUP'] in s for s in json_response['attributes']['memberOf']): user['permissions'] = ['Admin'] new_user = factory('User', **user) login_user(new_user) return redirect(url_for('base_blueprint.dashboard')) elif request.form['authentication_method'] == 'TACACS': if tacacs_client.authenticate(name, password).valid: user = factory('User', **{ 'name': name, 'password': password }) login_user(user) return redirect(url_for('base_blueprint.dashboard')) abort(403) except Exception as e: info(f'Authentication failed ({str(e)})') abort(403) if not current_user.is_authenticated: login_form = LoginForm(request.form) authentication_methods = [('Local User', ) * 2] if USE_LDAP: authentication_methods.append(('LDAP Domain', ) * 2) if USE_TACACS: authentication_methods.append(('TACACS', ) * 2) login_form.authentication_method.choices = authentication_methods return render_template('login.html', login_form=login_form) return redirect(url_for('base_blueprint.dashboard'))
def cluster_monitoring(self, _): parameters = get_one('Parameters') protocol = parameters.cluster_scan_protocol for instance in fetch_all('Instance'): factory( 'Instance', **get(f'{protocol}://{instance.ip_address}/rest/is_alive', timeout=parameters.cluster_scan_timeout).json()) return {'success': True}
def create_default_services() -> None: admin = fetch("User", name="admin").id for service in ( { "type": "SwissArmyKnifeService", "name": "Start", "description": "Start point of a workflow", "creator": admin, "hidden": True, }, { "type": "SwissArmyKnifeService", "name": "End", "description": "End point of a workflow", "creator": admin, "hidden": True, }, { "type": "SwissArmyKnifeService", "name": "mail_feedback_notification", "description": "Mail notification (service logs)", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "slack_feedback_notification", "description": "Slack notification (service logs)", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "mattermost_feedback_notification", "description": "Mattermost notification (service logs)", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "cluster_monitoring", "description": "Monitor eNMS cluster", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "git_push_configurations", "description": "Push configurations to Gitlab", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "poller_service", "description": "Configuration Management Poller", "creator": admin, "hidden": True, }, ): factory(service.pop("type"), **service)
def create_default_services(): admin = fetch('User', name='admin').id for service in ( { 'type': 'SwissArmyKnifeService', 'name': 'Start', 'description': 'Start point of a workflow', 'creator': admin, 'hidden': True }, { 'type': 'SwissArmyKnifeService', 'name': 'End', 'description': 'End point of a workflow', 'creator': admin, 'hidden': True }, { 'type': 'SwissArmyKnifeService', 'name': 'mail_feedback_notification', 'description': 'Mail notification (service logs)', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'slack_feedback_notification', 'description': 'Slack notification (service logs)', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'mattermost_feedback_notification', 'description': 'Mattermost notification (service logs)', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'cluster_monitoring', 'description': 'Monitor eNMS cluster', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'git_push_configurations', 'description': 'Push configurations to Gitlab', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'poller_service', 'description': 'Configuration Management Poller', 'creator': admin, 'hidden': True } ): factory(service.pop('type'), **service)
def create_napalm_workflow() -> None: admin = fetch("User", name="admin").id devices = [ fetch("Device", name="Washington").id, fetch("Device", name="Austin").id ] services = [ factory( "NapalmConfigurationService", **{ "name": "napalm_create_vrf_test", "description": 'Create a VRF "test" with Napalm', "waiting_time": 0, "devices": devices, "creator": admin, "driver": "eos", "vendor": "Arista", "operating_system": "eos", "content_type": "simple", "action": "load_merge_candidate", "content": "vrf definition test\n", }, ) ] services.extend([ fetch("Job", name="netmiko_check_vrf_test"), fetch("Job", name=f"netmiko_delete_vrf_test"), fetch("Job", name=f"netmiko_check_no_vrf_test"), ]) workflow = factory( "Workflow", **{ "name": "Napalm_VRF_workflow", "description": "Create and delete a VRF with Napalm", "creator": admin, "devices": devices, "vendor": "Arista", "operating_system": "eos", }, ) workflow.jobs.extend(services) edges = [(0, 2), (2, 3), (3, 4), (4, 5), (5, 1)] for x, y in edges: factory( "WorkflowEdge", **{ "name": f"{workflow.name} {x} -> {y}", "workflow": workflow.id, "subtype": "success", "source": workflow.jobs[x].id, "destination": workflow.jobs[y].id, }, ) positions = [(-20, 0), (20, 0), (0, -15), (0, -5), (0, 5), (0, 15)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions["Napalm_VRF_workflow"] = x * 10, y * 10
def configure_instance_id() -> None: factory( "Instance", **{ "name": str(getnode()), "description": "Localhost", "ip_address": "0.0.0.0", "status": "Up", }, )
def create_napalm_workflow(): services, admin = [], fetch('User', name='admin').id devices = [ fetch('Device', name='Washington').id, fetch('Device', name='Austin').id ] for service in ({ 'type': 'NapalmConfigurationService', 'name': 'napalm_create_vrf_test', 'description': 'Create a VRF "test" with Napalm', 'waiting_time': 0, 'devices': devices, 'creator': admin, 'driver': 'eos', 'vendor': 'Arista', 'operating_system': 'eos', 'content_type': 'simple', 'action': 'load_merge_candidate', 'content': 'vrf definition test\n' }, { 'type': 'NapalmRollbackService', 'name': 'Napalm eos Rollback', 'driver': 'eos', 'description': 'Rollback a configuration with Napalm eos', 'devices': devices, 'creator': admin, 'waiting_time': 0 }): instance = factory(service.pop('type'), **service) services.append(instance) services.insert(1, fetch('Job', name='netmiko_check_vrf_test')) services.append(fetch('Job', name=f'netmiko_check_no_vrf_test')) workflow = factory( 'Workflow', **{ 'name': 'Napalm_VRF_workflow', 'description': 'Create and delete a VRF with Napalm', 'creator': admin, 'vendor': 'Arista', 'operating_system': 'eos' }) workflow.jobs.extend(services) edges = [(0, 2), (2, 3), (3, 4), (4, 5), (5, 1)] for x, y in edges: factory( 'WorkflowEdge', **{ 'name': f'{workflow.name} {x} -> {y}', 'workflow': workflow.id, 'subtype': True, 'source': workflow.jobs[x].id, 'destination': workflow.jobs[y].id }) positions = [(-20, 0), (20, 0), (0, -15), (0, -5), (0, 5), (0, 15)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions['Napalm_VRF_workflow'] = x * 10, y * 10
def create_default_users() -> None: if not fetch("User", name="admin"): factory( "User", **{ "name": "admin", "email": "*****@*****.**", "password": "******", "permissions": ["Admin"], }, )
def cluster_monitoring(self, payload: dict) -> dict: parameters = get_one("Parameters") protocol = parameters.cluster_scan_protocol for instance in fetch_all("Instance"): factory( "Instance", **get( f"{protocol}://{instance.ip_address}/rest/is_alive", timeout=parameters.cluster_scan_timeout, ).json(), ) return {"success": True}
def create_network_topology(app): with open(app.path / 'projects' / 'usa.xls', 'rb') as f: book = open_workbook(file_contents=f.read()) for object_type in ('Device', 'Link'): try: sheet = book.sheet_by_name(object_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): values = dict(zip(properties, sheet.row_values(row_index))) factory(object_type, **values) db.session.commit()
def create_network_topology(app: Flask) -> None: with open(app.path / "projects" / "usa.xls", "rb") as f: book = open_workbook(file_contents=f.read()) for object_type in ("Device", "Link"): try: sheet = book.sheet_by_name(object_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): values = dict(zip(properties, sheet.row_values(row_index))) factory(object_type, **values) db.session.commit()
def query_netbox(): nb = netbox_api(request.form['netbox_address'], token=request.form['netbox_token']) for device in nb.dcim.devices.all(): device_ip = device.primary_ip4 or device.primary_ip6 factory( 'Device', **{ 'name': device.name, 'ip_address': str(device_ip).split('/')[0], 'subtype': request.form['netbox_type'], 'longitude': 0., 'latitude': 0. }) return jsonify(True)
def query_librenms(): devices = http_get( f'{request.form["librenms_address"]}/api/v0/devices', headers={'X-Auth-Token': request.form['librenms_token']} ).json()['devices'] for device in devices: factory('Device', **{ 'name': device['hostname'], 'ip_address': device['ip'] or device['hostname'], 'subtype': request.form['librenms_type'], 'longitude': 0., 'latitude': 0. }) db.session.commit() return True
def query_netbox() -> bool: nb = netbox_api(request.form["netbox_address"], token=request.form["netbox_token"]) for device in nb.dcim.devices.all(): device_ip = device.primary_ip4 or device.primary_ip6 factory( "Device", **{ "name": device.name, "ip_address": str(device_ip).split("/")[0], "subtype": request.form["netbox_type"], "longitude": 0.0, "latitude": 0.0, }, ) return True
def migrate_import(path_app, request): status = 'Import successful.' if request.get('empty_database_before_import', False): delete_all(*request['import_export_types']) for cls in request['import_export_types']: path = path_app / 'migrations' / request['name'] / f'{cls}.yaml' with open(path, 'r') as migration_file: for obj in load(migration_file): obj_cls = obj.pop('type') if cls == 'Service' else cls try: factory(obj_cls, **obj) except Exception as e: info(f'{str(obj)} could not be imported ({str(e)})') status = 'Partial import (see logs).' return status
def create_default_services(): admin = fetch('User', name='admin').id for service in ({ 'type': 'SwissArmyKnifeService', 'name': 'Start', 'description': 'Start point of a workflow', 'creator': admin, 'hidden': True }, { 'type': 'SwissArmyKnifeService', 'name': 'End', 'description': 'End point of a workflow', 'creator': admin, 'hidden': True }, { 'type': 'SwissArmyKnifeService', 'name': 'mail_feedback_notification', 'description': 'Mail notification (service logs)', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'slack_feedback_notification', 'description': 'Slack notification (service logs)', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'mattermost_feedback_notification', 'description': 'Mattermost notification (service logs)', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'cluster_monitoring', 'description': 'Monitor eNMS cluster', 'creator': admin }, { 'type': 'SwissArmyKnifeService', 'name': 'git_push_configurations', 'description': 'Push configurations to Gitlab', 'creator': admin }, { 'type': 'ConfigurationBackupService', 'name': 'configuration_backup', 'description': 'Back up device configurations', 'pools': [fetch('Pool', name='Devices only').id], 'multiprocessing': True, 'creator': admin }): factory(service.pop('type'), **service)
def create_default_pools(): for pool in ({ 'name': 'All objects', 'description': 'All objects' }, { 'name': 'Devices only', 'description': 'Devices only', 'link_name': '^$', 'link_name_regex': 'y' }, { 'name': 'Links only', 'description': 'Links only', 'device_name': '^$', 'device_name_regex': 'y' }): factory('Pool', **pool)
def scan_cluster(): parameters = get_one('Parameters') protocol = parameters.cluster_scan_protocol for ip_address in IPv4Network(parameters.cluster_scan_subnet): try: factory('Instance', **{ **rest_get( f'{protocol}://{ip_address}/rest/is_alive', timeout=parameters.cluster_scan_timeout ).json(), **{'ip_address': str(ip_address)} }) except ConnectionError: continue db.session.commit() return True
def create_example_services(): for service in ( { 'type': 'configure_bgp_service', 'name': 'napalm_configure_bgp_1', 'description': 'Configure BGP Peering with Napalm', 'devices': [fetch('Device', name='Washington')], 'local_as': 100, 'loopback': 'Lo100', 'loopback_ip': '100.1.1.1', 'neighbor_ip': '100.1.2.1', 'remote_as': 200, 'vrf_name': 'configure_BGP_test', 'waiting_time': 0 }, ): factory(service.pop('type'), **service)
def update_instance(cls): try: instance = factory(cls, **request.form) info(f'{current_user.name}: UPDATE {cls} ' f'{instance.name} ({instance.id})') return instance.serialized except JSONDecodeError: return {'error': 'Invalid JSON syntax (JSON field)'}