def create_default_workflows() -> None: name = "Configuration Management Workflow" workflow = factory( "Workflow", **{ "name": name, "description": "Poll configuration and push to gitlab", "use_workflow_targets": False, "creator": fetch("User", name="admin").id, }, ) workflow.jobs.extend([ fetch("Service", name="poller_service"), fetch("Service", name="git_push_configurations"), ]) edges = [(0, 2, True), (2, 3, True), (2, 3, False), (3, 1, True)] for x, y, edge_type in edges: factory( "WorkflowEdge", **{ "name": f"{workflow.name} {x} -> {y} ({edge_type})", "workflow": workflow.id, "subtype": "success" if edge_type else "failure", "source": workflow.jobs[x].id, "destination": workflow.jobs[y].id, }, ) positions = [(-30, 0), (20, 0), (0, -20), (0, 30)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions[name] = x * 10, y * 10
def object_import(request: dict, file: FileStorage) -> str: if request["replace"]: delete_all("Device") result = "Topology successfully imported." if allowed_file(secure_filename(file.filename), {"xls", "xlsx"}): book = open_workbook(file_contents=file.read()) for obj_type in ("Device", "Link"): try: sheet = book.sheet_by_name(obj_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): prop = dict(zip(properties, sheet.row_values(row_index))) try: factory(obj_type, **prop).serialized except Exception as e: info(f"{str(prop)} could not be imported ({str(e)})") result = "Partial import (see logs)." db.session.commit() if request["update_pools"]: for pool in fetch_all("Pool"): pool.compute_pool() db.session.commit() return result
def create_default_services() -> None: admin = fetch("User", name="admin").id for service in ( { "type": "SwissArmyKnifeService", "name": "Start", "description": "Start point of a workflow", "creator": admin, "hidden": True, }, { "type": "SwissArmyKnifeService", "name": "End", "description": "End point of a workflow", "creator": admin, "hidden": True, }, { "type": "SwissArmyKnifeService", "name": "mail_feedback_notification", "description": "Mail notification (service logs)", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "slack_feedback_notification", "description": "Slack notification (service logs)", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "mattermost_feedback_notification", "description": "Mattermost notification (service logs)", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "cluster_monitoring", "description": "Monitor eNMS cluster", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "git_push_configurations", "description": "Push configurations to Gitlab", "creator": admin, }, { "type": "SwissArmyKnifeService", "name": "poller_service", "description": "Configuration Management Poller", "creator": admin, "hidden": True, }, ): factory(service.pop("type"), **service)
def create_napalm_workflow() -> None: admin = fetch("User", name="admin").id devices = [ fetch("Device", name="Washington").id, fetch("Device", name="Austin").id ] services = [ factory( "NapalmConfigurationService", **{ "name": "napalm_create_vrf_test", "description": 'Create a VRF "test" with Napalm', "waiting_time": 0, "devices": devices, "creator": admin, "driver": "eos", "vendor": "Arista", "operating_system": "eos", "content_type": "simple", "action": "load_merge_candidate", "content": "vrf definition test\n", }, ) ] services.extend([ fetch("Job", name="netmiko_check_vrf_test"), fetch("Job", name=f"netmiko_delete_vrf_test"), fetch("Job", name=f"netmiko_check_no_vrf_test"), ]) workflow = factory( "Workflow", **{ "name": "Napalm_VRF_workflow", "description": "Create and delete a VRF with Napalm", "creator": admin, "devices": devices, "vendor": "Arista", "operating_system": "eos", }, ) workflow.jobs.extend(services) edges = [(0, 2), (2, 3), (3, 4), (4, 5), (5, 1)] for x, y in edges: factory( "WorkflowEdge", **{ "name": f"{workflow.name} {x} -> {y}", "workflow": workflow.id, "subtype": "success", "source": workflow.jobs[x].id, "destination": workflow.jobs[y].id, }, ) positions = [(-20, 0), (20, 0), (0, -15), (0, -5), (0, 5), (0, 15)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions["Napalm_VRF_workflow"] = x * 10, y * 10
def configure_instance_id() -> None: factory( "Instance", **{ "name": str(getnode()), "description": "Localhost", "ip_address": "0.0.0.0", "status": "Up", }, )
def create_default_users() -> None: if not fetch("User", name="admin"): factory( "User", **{ "name": "admin", "email": "*****@*****.**", "password": "******", "permissions": ["Admin"], }, )
def cluster_monitoring(self, payload: dict) -> dict: parameters = get_one("Parameters") protocol = parameters.cluster_scan_protocol for instance in fetch_all("Instance"): factory( "Instance", **get( f"{protocol}://{instance.ip_address}/rest/is_alive", timeout=parameters.cluster_scan_timeout, ).json(), ) return {"success": True}
def create_network_topology(app: Flask) -> None: with open(app.path / "projects" / "usa.xls", "rb") as f: book = open_workbook(file_contents=f.read()) for object_type in ("Device", "Link"): try: sheet = book.sheet_by_name(object_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): values = dict(zip(properties, sheet.row_values(row_index))) factory(object_type, **values) db.session.commit()
def query_netbox() -> bool: nb = netbox_api(request.form["netbox_address"], token=request.form["netbox_token"]) for device in nb.dcim.devices.all(): device_ip = device.primary_ip4 or device.primary_ip6 factory( "Device", **{ "name": device.name, "ip_address": str(device_ip).split("/")[0], "subtype": request.form["netbox_type"], "longitude": 0.0, "latitude": 0.0, }, ) return True
def update_instance(cls: str) -> dict: try: instance = factory(cls, **request.form) info(f"{current_user.name}: UPDATE {cls} " f"{instance.name} ({instance.id})") return instance.serialized except JSONDecodeError: return {"error": "Invalid JSON syntax (JSON field)"} except IntegrityError: return {"error": "An object with the same name already exists"}
def query_librenms() -> bool: devices = http_get( f'{request.form["librenms_address"]}/api/v0/devices', headers={ "X-Auth-Token": request.form["librenms_token"] }, ).json()["devices"] for device in devices: factory( "Device", **{ "name": device["hostname"], "ip_address": device["ip"] or device["hostname"], "subtype": request.form["librenms_type"], "longitude": 0.0, "latitude": 0.0, }, ) db.session.commit() return True
def create_default_pools() -> None: for pool in ( { "name": "All objects", "description": "All objects" }, { "name": "Devices only", "description": "Devices only", "link_name": "^$", "link_name_regex": "y", }, { "name": "Links only", "description": "Links only", "device_name": "^$", "device_name_regex": "y", }, ): factory("Pool", **pool)
def query_opennms() -> bool: parameters = get_one("Parameters") login, password = parameters.opennms_login, request.form["password"] parameters.update(**request.form) db.session.commit() json_devices = http_get( parameters.opennms_devices, headers={ "Accept": "application/json" }, auth=(login, password), ).json()["node"] devices = { device["id"]: { "name": device.get("label", device["id"]), "description": device["assetRecord"].get("description", ""), "location": device["assetRecord"].get("building", ""), "vendor": device["assetRecord"].get("manufacturer", ""), "model": device["assetRecord"].get("modelNumber", ""), "operating_system": device.get("operatingSystem", ""), "os_version": device["assetRecord"].get("sysDescription", ""), "longitude": device["assetRecord"].get("longitude", 0.0), "latitude": device["assetRecord"].get("latitude", 0.0), "subtype": request.form["subtype"], } for device in json_devices } for device in list(devices): link = http_get( f"{parameters.opennms_rest_api}/nodes/{device}/ipinterfaces", headers={ "Accept": "application/json" }, auth=(login, password), ).json() for interface in link["ipInterface"]: if interface["snmpPrimary"] == "P": devices[device]["ip_address"] = interface["ipAddress"] factory("Device", **devices[device]) db.session.commit() return True
def scan_cluster() -> bool: parameters = get_one("Parameters") protocol = parameters.cluster_scan_protocol for ip_address in IPv4Network(parameters.cluster_scan_subnet): try: instance = rest_get( f"{protocol}://{ip_address}/rest/is_alive", timeout=parameters.cluster_scan_timeout, ).json() if app.config["CLUSTER_ID"] != instance.pop("cluster_id"): continue factory("Instance", **{ **instance, **{ "ip_address": str(ip_address) } }) except ConnectionError: continue db.session.commit() return True
def duplicate_workflow(workflow_id: int) -> dict: parent_workflow = fetch("Workflow", id=workflow_id) new_workflow = factory("Workflow", **request.form) for job in parent_workflow.jobs: new_workflow.jobs.append(job) job.positions[new_workflow.name] = job.positions[parent_workflow.name] for edge in parent_workflow.edges: subtype, src, destination = edge.subtype, edge.source, edge.destination new_workflow.edges.append( factory( "WorkflowEdge", **{ "name": f"{new_workflow.id}-{subtype}:{src.id}->{destination.id}", "workflow": new_workflow.id, "subtype": subtype, "source": src.id, "destination": destination.id, }, )) db.session.commit() return new_workflow.serialized
def create_default_tasks(app: Flask) -> None: tasks = [ { "aps_job_id": "Poller", "name": "Poller", "description": "Back-up device configurations", "job": fetch("Workflow", name="Configuration Management Workflow").id, "frequency": 3600, }, { "aps_job_id": "Cluster Monitoring", "name": "Cluster Monitoring", "description": "Monitor eNMS cluster", "job": fetch("Service", name="cluster_monitoring").id, "frequency": 15, "is_active": app.config["CLUSTER"], }, ] for task in tasks: if not fetch("Task", name=task["name"]): factory("Task", **task)
def add_edge(workflow_id: int, subtype: str, source: int, dest: int) -> dict: workflow_edge = factory( "WorkflowEdge", **{ "name": f"{workflow_id}-{subtype}:{source}->{dest}", "workflow": workflow_id, "subtype": subtype, "source": source, "destination": dest, }, ) fetch("Workflow", id=workflow_id).last_modified = str(datetime.now()) db.session.commit() return workflow_edge.serialized
def create_workflow_of_workflows() -> None: admin = fetch("User", name="admin").id devices = [fetch("Device", name="Washington").id] workflow = factory( "Workflow", **{ "name": "Workflow_of_workflows", "description": "Test the inner workflow system", "devices": devices, "creator": admin, "vendor": "Arista", "operating_system": "eos", }, ) workflow.jobs.extend([ fetch("Job", name="payload_transfer_workflow"), fetch("Job", name="get_interfaces"), fetch("Job", name="Napalm_VRF_workflow"), ]) edges = [(0, 2), (2, 3), (3, 4), (4, 1)] for x, y in edges: factory( "WorkflowEdge", **{ "name": f"{workflow.name} {x} -> {y}", "workflow": workflow.id, "subtype": "success", "devices": devices, "source": workflow.jobs[x].id, "destination": workflow.jobs[y].id, }, ) positions = [(-30, 0), (30, 0), (0, -20), (0, 0), (0, 20)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions[ "Workflow_of_workflows"] = x * 10, y * 10
def migrate_import(app: Flask, request: dict) -> str: status, types = "Import successful.", request["import_export_types"] workflows: list = [] edges: list = [] if request.get("empty_database_before_import", False): delete_all(*types) for cls in types: path = app.path / "migrations" / request["name"] / f"{cls}.yaml" with open(path, "r") as migration_file: objects = load(migration_file) if cls == "Workflow": workflows = deepcopy(objects) if cls == "WorkflowEdge": edges = deepcopy(objects) continue for obj in objects: obj_cls = obj.pop("type") if cls == "Service" else cls # 1) We cannot import workflow edges before workflow, because a # workflow edge is defined by the workflow it belongs to. # Therefore, we import workflow before workflow edges but # strip off the edges, because they do not exist at this stage. # Edges will be defined later on upon importing workflow edges. # 2) At this stage, we cannot import jobs, because if workflows # A (ID 1) and B (ID 2) are created, and B is added to A as a # subworkflow, we won't be able to create A as B is one of its # jobs and does not exist yet. To work around this, we will # strip off the jobs at this stage, and reimport workflows a # second time at the end. if cls == "Workflow": obj["edges"], obj["jobs"] = [], [] try: factory(obj_cls, **obj) except Exception as e: info(f"{str(obj)} could not be imported ({str(e)})") status = "Partial import (see logs)." for workflow in workflows: workflow["edges"] = [] try: factory("Workflow", **workflow) except Exception as e: info(f"{str(workflow)} could not be imported ({str(e)})") status = "Partial import (see logs)." for edge in edges: try: factory("WorkflowEdge", **edge) except Exception as e: info(f"{str(edge)} could not be imported ({str(e)})") status = "Partial import (see logs)." if request.get("empty_database_before_import", False): create_default(app) return status
def login() -> Union[Response, str]: if request.method == "POST": name, password = request.form["name"], request.form["password"] try: if request.form["authentication_method"] == "Local User": user = fetch("User", name=name) if user and password == user.password: login_user(user) return redirect(url_for("base_blueprint.dashboard")) elif request.form["authentication_method"] == "LDAP Domain": with Connection( ldap_client, user=f'{app.config["LDAP_USERDN"]}\\{name}', password=password, auto_bind=True, authentication=NTLM, ) as connection: connection.search( app.config["LDAP_BASEDN"], f"(&(objectClass=person)(samaccountname={name}))", search_scope=SUBTREE, get_operational_attributes=True, attributes=["cn", "memberOf", "mail"], ) json_response = loads( connection.response_to_json())["entries"][0] if json_response: user = { "name": name, "password": password, "email": json_response["attributes"].get("mail", ""), } if any(group in s for group in app.config["LDAP_ADMIN_GROUP"] for s in json_response["attributes"]["memberOf"]): user["permissions"] = ["Admin"] new_user = factory("User", **user) login_user(new_user) return redirect(url_for("base_blueprint.dashboard")) elif request.form["authentication_method"] == "TACACS": if tacacs_client.authenticate(name, password).valid: user = factory("User", **{ "name": name, "password": password }) login_user(user) return redirect(url_for("base_blueprint.dashboard")) abort(403) except Exception as e: info(f"Authentication failed ({str(e)})") abort(403) if not current_user.is_authenticated: login_form = LoginForm(request.form) authentication_methods = [("Local User", ) * 2] if USE_LDAP: authentication_methods.append(("LDAP Domain", ) * 2) if USE_TACACS: authentication_methods.append(("TACACS", ) * 2) login_form.authentication_method.choices = authentication_methods return render_template("login.html", login_form=login_form) return redirect(url_for("base_blueprint.dashboard"))
def post(self, cls: str) -> dict: return factory(cls, **request.get_json()).serialized
def create_payload_transfer_workflow() -> None: services, admin = [], fetch("User", name="admin").id devices = [ fetch("Device", name="Washington").id, fetch("Device", name="Austin").id ] for service in ([{ "name": "GET_device", "type": "RestCallService", "description": "Use GET ReST call on eNMS ReST API", "username": "******", "password": "******", "waiting_time": 0, "devices": devices, "creator": admin, "content_match": "", "call_type": "GET", "url": "http://127.0.0.1:5000/rest/instance/device/{{device.name}}", "payload": "", "multiprocessing": "y", }] + [{ "name": f"{getter}", "type": "NapalmGettersService", "description": f"Getter: {getter}", "waiting_time": 0, "devices": devices, "creator": admin, "driver": "eos", "content_match": "", "getters": [getter], } for getter in ( "get_facts", "get_interfaces", "get_interfaces_ip", "get_config", )] + [{ "name": "process_payload1", "type": "SwissArmyKnifeService", "description": "Process Payload in example workflow", "waiting_time": 0, "devices": devices, "creator": admin, }]): instance = factory(service.pop("type"), **service) services.append(instance) workflow = factory( "Workflow", **{ "name": "payload_transfer_workflow", "description": "ReST call, Napalm getters, etc", "use_workflow_targets": False, "creator": admin, "devices": devices, "vendor": "Arista", "operating_system": "eos", }, ) workflow.jobs.extend(services) # create workflow edges with following schema: positions = [ (-20, 0), (50, 0), (-5, 0), (-5, -10), (15, 10), (15, -10), (30, -10), (30, 0), ] for index, (x, y) in enumerate(positions): job = workflow.jobs[index] job.positions["payload_transfer_workflow"] = x * 10, y * 10 edges = [(0, 2), (2, 3), (2, 4), (3, 5), (5, 6), (6, 7), (4, 7), (7, 1)] for x, y in edges: factory( "WorkflowEdge", **{ "name": f"{workflow.name}:success {x} -> {y}", "workflow": workflow.id, "subtype": "success", "source": workflow.jobs[x].id, "destination": workflow.jobs[y].id, }, ) prerequisite_edges = [(4, 7), (3, 7)] for x, y in prerequisite_edges: factory( "WorkflowEdge", **{ "name": f"{workflow.name}:prerequisite {x} -> {y}", "workflow": workflow.id, "subtype": "prerequisite", "source": workflow.jobs[x].id, "destination": workflow.jobs[y].id, }, )
def create_example_services() -> None: admin = fetch("User", name="admin").id for service in ( { "type": "ConfigureBgpService", "name": "napalm_configure_bgp_1", "description": "Configure BGP Peering with Napalm", "devices": [fetch("Device", name="Washington").id], "creator": admin, "local_as": 100, "loopback": "Lo100", "loopback_ip": "100.1.1.1", "neighbor_ip": "100.1.2.1", "remote_as": 200, "vrf_name": "configure_BGP_test", "waiting_time": 0, }, { "type": "GenericFileTransferService", "name": "test_file_transfer_service", "description": "Test the file transfer service", "devices": [fetch("Device", name="Aserver").id], "creator": admin, "direction": "get", "protocol": "scp", "source_file": "/media/sf_VM/eNMS/tests/file_transfer/a.bin", "destination_file": "/media/sf_VM/eNMS/tests/file_transfer/b.bin", "missing_host_key_policy": True, }, { "type": "LogBackupService", "name": "test_log_backup_service", "description": "Test the log backup service", "devices": [fetch("Device", name="Aserver").id], "creator": admin, "protocol": "scp", "destination_ip_address": "127.0.0.1", "destination_path": "/media/sf_VM/eNMS/tests/file_transfer", "delete_archive": True, "delete_folder": True, }, { "type": "DatabaseBackupService", "name": "test_database_backup_service", "description": "Test the log backup service", "devices": [fetch("Device", name="Aserver").id], "creator": admin, "protocol": "scp", "destination_ip_address": "127.0.0.1", "destination_path": "/media/sf_VM/eNMS/tests/file_transfer", "delete_archive": True, "delete_folder": True, }, { "type": "ConfigurationBackupService", "name": "configuration_backup", "description": "Test Configuration Management", "devices": [fetch("Device", name="Washington").id], "configuration_command": "show running-config", "multiprocessing": True, "creator": admin, }, ): factory(service.pop("type"), **service)
def create_netmiko_workflow() -> None: services, admin = [], fetch("User", name="admin").id devices = [ fetch("Device", name="Washington").id, fetch("Device", name="Austin").id ] for service in ( { "type": "NetmikoConfigurationService", "name": "netmiko_create_vrf_test", "description": 'Create a VRF "test" with Netmiko', "waiting_time": 0, "devices": devices, "creator": admin, "vendor": "Arista", "operating_system": "eos", "driver": "arista_eos", "global_delay_factor": "1.0", "content": "vrf definition test", "enable_mode": "y", "fast_cli": "y", "timeout": 3, }, { "type": "NetmikoValidationService", "name": "netmiko_check_vrf_test", "description": 'Check that the vrf "test" is configured', "waiting_time": 0, "devices": devices, "creator": admin, "vendor": "Arista", "operating_system": "eos", "driver": "arista_eos", "command": "show vrf", "content_match": "test", "fast_cli": "y", "timeout": 3, }, { "type": "NetmikoConfigurationService", "name": "netmiko_delete_vrf_test", "description": 'Delete VRF "test"', "waiting_time": 1, "devices": devices, "creator": admin, "vendor": "Arista", "operating_system": "eos", "driver": "arista_eos", "global_delay_factor": "1.0", "content": "no vrf definition test", "enable_mode": "y", "fast_cli": "y", "timeout": 3, }, { "type": "NetmikoValidationService", "name": "netmiko_check_no_vrf_test", "description": 'Check that the vrf "test" is NOT configured', "waiting_time": 0, "devices": devices, "creator": admin, "vendor": "Arista", "operating_system": "eos", "driver": "arista_eos", "command": "show vrf", "content_match": "^((?!test)[\s\S])*$", "content_match_regex": "y", "fast_cli": "y", "timeout": 3, "number_of_retries": 2, "time_between_retries": 1, }, ): instance = factory(service.pop("type"), **service) services.append(instance) workflow = factory( "Workflow", **{ "name": "Netmiko_VRF_workflow", "description": "Create and delete a VRF with Netmiko", "creator": admin, "devices": devices, "vendor": "Arista", "operating_system": "eos", }, ) workflow.jobs.extend(services) edges = [(0, 2), (2, 3), (3, 4), (4, 5), (5, 1)] for x, y in edges: factory( "WorkflowEdge", **{ "name": f"{workflow.name} {x} -> {y}", "workflow": workflow.id, "subtype": "success", "source": workflow.jobs[x].id, "destination": workflow.jobs[y].id, }, ) positions = [(-20, 0), (20, 0), (0, -15), (0, -5), (0, 5), (0, 15)] for index, (x, y) in enumerate(positions): workflow.jobs[index].positions["Netmiko_VRF_workflow"] = x * 10, y * 10