def fetch(model, allow_none=False, all_matches=False, **kwargs): query = Session.query(models[model]).filter_by(**kwargs) result = query.all() if all_matches else query.first() if result or allow_none: return result else: raise Exception( f"There is no {model} in the database " f"with the following characteristics: {kwargs}" )
def table_filtering(self, table, **kwargs): model, properties = models[table], table_properties[table] operator = and_ if kwargs["form"].get("operator", "all") == "all" else or_ column_index = int(kwargs["order"][0]["column"]) if column_index < len(properties): order_property = getattr(model, properties[column_index]) order_function = getattr(order_property, kwargs["order"][0]["dir"], None) else: order_function = None constraints = self.build_filtering_constraints(table, **kwargs) if table == "result": constraints.append( models["result"].service.has(id=kwargs["instance"]["id"])) if kwargs.get("runtime"): constraints.append( models["result"].parent_runtime == kwargs["runtime"]) if table == "service": workflow_id = kwargs["form"].get("workflow-filtering") if workflow_id: constraints.append(models["service"].workflows.any( models["workflow"].id == int(workflow_id))) else: if kwargs["form"].get("parent-filtering") == "true": constraints.append(~models["service"].workflows.any()) if table == "run": constraints.append(models["run"].children.any()) result = Session.query(model).filter(operator(*constraints)) if order_function: result = result.order_by(order_function()) return { "draw": int(kwargs["draw"]), "recordsTotal": Session.query(func.count(model.id)).scalar(), "recordsFiltered": get_query_count(result), "data": [ obj.generate_row(**kwargs) for obj in result.limit( int(kwargs["length"])).offset(int(kwargs["start"])).all() ], }
def visible(cls) -> List: if cls.__tablename__ == "Pool" and user.pools: return user.pools elif cls.__tablename__ in ("Device", "Link") and user.pools: objects: set = set() for pool in user.pools: objects |= set(getattr(pool, f"{cls.class_type}s")) return list(objects) else: return Session.query(cls).all()
def authenticate_user(self, **kwargs): name, password = kwargs["name"], kwargs["password"] if kwargs["authentication_method"] == "Local User": user = fetch("user", allow_none=True, name=name) return user if user and password == user.password else False elif kwargs["authentication_method"] == "LDAP Domain": with Connection( self.ldap_client, user=f"{self.config['ldap']['userdn']}\\{name}", password=password, auto_bind=True, authentication=NTLM, ) as connection: connection.search( self.config["ldap"]["basedn"], f"(&(objectClass=person)(samaccountname={name}))", search_scope=SUBTREE, get_operational_attributes=True, attributes=["cn", "memberOf", "mail"], ) json_response = loads( connection.response_to_json())["entries"][0] if json_response and any( group in s for group in self.config["ldap"]["admin_group"].split( ",") for s in json_response["attributes"]["memberOf"]): user = factory( "user", **{ "name": name, "password": password, "email": json_response["attributes"].get("mail", ""), "permissions": ["Admin"], }, ) elif kwargs["authentication_method"] == "TACACS": if self.tacacs_client.authenticate(name, password).valid: user = factory("user", **{"name": name, "password": password}) Session.commit() return user
def table_filtering(self, table, kwargs): model, properties = models[table], table_properties[table] operator = and_ if kwargs.get("form[operator]", "all") == "all" else or_ column_index = int(kwargs["order[0][column]"]) if column_index < len(properties): order_property = getattr(model, properties[column_index]) order_function = getattr(order_property, kwargs["order[0][dir]"], None) else: order_function = None constraints = self.build_filtering_constraints(table, kwargs) if table == "result": constraints.append( getattr( models["result"], "service" if "service" in kwargs["instance[type]"] else kwargs["instance[type]"], ).has(id=kwargs["instance[id]"])) if kwargs.get("service[runtime]"): constraints.append(models["result"].parent_runtime == kwargs.get("service[runtime]")) elif table == "configuration" and kwargs.get("instance[id]"): constraints.append( getattr(models[table], "device").has(id=kwargs["instance[id]"])) result = Session.query(model).filter(operator(*constraints)) if order_function: result = result.order_by(order_function()) return { "draw": int(kwargs["draw"]), "recordsTotal": Session.query(func.count(model.id)).scalar(), "recordsFiltered": get_query_count(result), "data": [[ getattr(obj, f"table_{property}", getattr(obj, property)) for property in properties ] + obj.generate_row(table) for obj in result.limit( int(kwargs["length"])).offset(int(kwargs["start"])).all()], }
def update_database_configurations_from_git(self) -> None: for dir in scandir(self.path / "git" / "configurations"): if dir.name == ".git": continue device = fetch("Device", allow_none=True, name=dir.name) if device: with open(Path(dir.path) / "data.yml") as data: parameters = yaml.load(data) device.update(**{"dont_update_pools": True, **parameters}) config_file = Path(dir.path) / dir.name if not config_file.exists(): continue with open(config_file) as f: device.current_configuration = device.configurations[ str(parameters["last_update"]) ] = f.read() Session.commit() for pool in fetch_all("Pool"): if pool.device_current_configuration: pool.compute_pool()
def update_database_configurations_from_git(self): for dir in scandir(self.path / "network_data"): if dir.name == ".git": continue device = fetch("device", allow_none=True, name=dir.name) if device: with open(Path(dir.path) / "data.yml") as data: parameters = yaml.load(data) device.update(**{"dont_update_pools": True, **parameters}) for data in ("configuration", "operational_data"): filepath = Path(dir.path) / dir.name / data if not filepath.exists(): continue with open(filepath) as file: setattr(device, data, file.read()) Session.commit() for pool in fetch_all("pool"): if pool.device_configuration or pool.device_operational_data: pool.compute_pool()
def update(self, instance_type, **kwargs): try: must_be_new = kwargs.get("id") == "" for arg in ("name", "scoped_name"): if arg in kwargs: kwargs[arg] = kwargs[arg].strip() kwargs["last_modified"] = self.get_time() kwargs["creator"] = kwargs["user"] = getattr(current_user, "name", "admin") instance = factory(instance_type, must_be_new=must_be_new, **kwargs) if kwargs.get("original"): fetch(instance_type, id=kwargs["original"]).duplicate(clone=instance) Session.flush() return instance.serialized except Exception as exc: Session.rollback() if isinstance(exc, IntegrityError): return { "alert": (f"There already is a {instance_type} with the same name") } return {"alert": str(exc)}
def configure_database(self): self.init_services() Base.metadata.create_all(bind=engine) configure_mappers() configure_events(self) self.init_forms() self.clean_database() if not fetch("user", allow_none=True, name="admin"): self.configure_server_id() self.create_admin_user() Session.commit() if self.settings["app"]["create_examples"]: self.migration_import(name="examples", import_export_types=import_classes) self.update_credentials() else: self.migration_import(name="default", import_export_types=import_classes) self.get_git_content() Session.commit()
def get_results(self, payload, device=None): self.log("info", "STARTING", device) start = datetime.now().replace(microsecond=0) results = {"runtime": app.get_time(), "logs": []} try: if self.restart_run and self.service.type == "workflow": old_result = self.restart_run.result(device=device.name) if old_result and "payload" in old_result.result: payload.update(old_result["payload"]) if self.service.iteration_values: targets_results = {} for target in self.eval(self.service.iteration_values, **locals()): self.payload_helper(payload, self.iteration_variable_name, target) targets_results[str(target)] = self.run_service_job( payload, device) results.update({ "result": targets_results, "success": all(r["success"] for r in targets_results.values()), }) else: results.update(self.run_service_job(payload, device)) except Exception: results.update({ "success": False, "result": chr(10).join(format_exc().splitlines()) }) self.log("error", chr(10).join(format_exc().splitlines()), device) results["duration"] = str(datetime.now().replace(microsecond=0) - start) if device: status = "success" if results["success"] else "failure" self.run_state["progress"]["device"][status] += 1 self.run_state["summary"][status].append(device.name) self.create_result(results, device) Session.commit() self.log("info", "FINISHED", device) return results
def migration_import(self, folder="migrations", **kwargs): status, models = "Import successful.", kwargs["import_export_types"] if kwargs.get("empty_database_before_import", False): for model in models: delete_all(model) Session.commit() workflow_edges, workflow_services = [], {} folder_path = self.path / "files" / folder / kwargs["name"] for model in models: path = folder_path / f"{model}.yaml" if not path.exists(): continue with open(path, "r") as migration_file: instances = yaml.load(migration_file) if model == "workflow_edge": workflow_edges = deepcopy(instances) continue for instance in instances: instance_type = (instance.pop("type") if model == "service" else model) if instance_type == "workflow": workflow_services[instance["name"]] = instance.pop( "services") try: instance = self.objectify(instance_type, instance) factory(instance_type, **instance) Session.commit() except Exception: info(f"{str(instance)} could not be imported :" f"{chr(10).join(format_exc().splitlines())}") status = "Partial import (see logs)." try: for name, services in workflow_services.items(): workflow = fetch("workflow", name=name) workflow.services = [ fetch("service", name=service_name) for service_name in services ] Session.commit() for edge in workflow_edges: for property in ("source", "destination", "workflow"): edge[property] = fetch("service", name=edge[property]).id factory("workflow_edge", **edge) Session.commit() for service in fetch_all("service"): service.set_name() self.log("info", status) except Exception: info(chr(10).join(format_exc().splitlines())) status = "Partial import (see logs)." return status
def multiselect_filtering(self, type, **params): model = models[type] results = Session.query(model).filter(model.name.contains(params.get("term"))) return { "items": [ {"text": result.ui_name, "id": str(result.id)} for result in results.limit(10) .offset((int(params["page"]) - 1) * 10) .all() ], "total_count": results.count(), }
def post(self) -> dict: data = request.get_json(force=True) factory( "Pool", **{ "name": data["name"], "devices": [ fetch("Device", name=name).id for name in data.get("devices", "") ], "links": [ fetch("Link", name=name).id for name in data.get("links", "") ], "never_update": True, }, ) Session.commit() return data
def duplicate(self, workflow=None, clone=None): if not clone: clone = super().duplicate(workflow) clone_services = {} Session.commit() for service in self.services: if service.shared: service_clone = service if service not in clone.services: clone.services.append(service) else: service_clone = service.duplicate(clone) service_clone.positions[clone.name] = service.positions.get( self.name, (0, 0)) clone_services[service.id] = service_clone Session.commit() for edge in self.edges: clone.edges.append( factory( "workflow_edge", **{ "workflow": clone.id, "subtype": edge.subtype, "source": clone_services[edge.source.id].id, "destination": clone_services[edge.destination.id].id, }, )) Session.commit() return clone
def import_jobs(self, **kwargs: Any) -> None: jobs = kwargs["jobs_to_import"] path = self.path / "projects" / "exported_jobs" for file in scandir(path / "services"): if file.name == ".gitkeep" or file.name not in jobs: continue with open(file.path, "r") as instance_file: instance = yaml.load(instance_file) model = instance.pop("type") factory(model, **self.objectify(model, instance)) Session.commit() for workflow in listdir(path / "workflows"): if workflow == ".gitkeep" or workflow not in jobs: continue workflow_name = workflow.split(".")[0] with open_tar(path / "workflows" / workflow) as tar_file: tar_file.extractall(path=path / "workflows") for instance_type in ("jobs", "workflow", "edges"): path_job = path / "workflows" / workflow_name / instance_type for file in scandir(path_job): with open(path_job / file.name, "r") as instance_file: instance = yaml.load(instance_file) if instance_type == "workflow": delete("Workflow", allow_none=True, name=instance["name"]) Session.commit() model = instance.pop("type") factory(model, **self.objectify(model, instance)) Session.commit() rmtree(path / "workflows" / workflow_name)
def import_jobs(self, **kwargs: Any) -> None: jobs = kwargs["jobs_to_import"] path = self.path / "projects" / "exported_jobs" for file in scandir(path / "services"): if file.name == ".gitkeep" or file.name not in jobs: continue with open(file.path, "r") as instance_file: instance = load(instance_file, Loader=BaseLoader) model = instance.pop("type") factory(model, **self.objectify(model, instance)) Session.commit() for workflow in listdir(path / "workflows"): if workflow == ".gitkeep" or workflow not in jobs: continue for instance_type in ("jobs", "workflow", "edges"): path_job = path / "workflows" / workflow / instance_type for file in scandir(path_job): with open(path_job / file.name, "r") as instance_file: instance = load(instance_file, Loader=BaseLoader) model = instance.pop("type") factory(model, **self.objectify(model, instance)) Session.commit()
def run(self, payload: Optional[dict] = None) -> dict: try: self.log("info", f"{self.job.type} {self.job.name}: Starting") self.set_state(status="Running", type=self.job.type) controller.job_db[self.job.id]["runs"] += 1 Session.commit() results = self.job.build_results(self, payload or self.initial_payload) self.close_connection_cache() self.log("info", f"{self.job.type} {self.job.name}: Finished") except Exception: result = (f"Running {self.job.type} '{self.job.name}'" " raised the following exception:\n" f"{chr(10).join(format_exc().splitlines())}\n\n" "Run aborted...") self.log("error", result) results = {"success": False, "results": result} finally: status = f"Completed ({'success' if results['success'] else 'failure'})" self.status = status # type: ignore self.set_state(status=status) controller.job_db[self.job.id]["runs"] -= 1 results["endtime"] = self.endtime = controller.get_time( ) # type: ignore results["state"] = controller.run_db.pop(self.runtime) results["logs"] = controller.run_logs.pop( self.runtime) # type: ignore if self.task and not self.task.frequency: self.task.is_active = False results["properties"] = { "run": self.properties, "service": self.job.to_dict(True), } self.create_result(results) Session.commit() if not self.workflow and self.send_notification: self.notify(results) return results
def route(page): f, *args = page.split("/") if f not in app.valid_post_endpoints: return jsonify({"error": "Invalid POST request."}) form_type = request.form.get("form_type") if f in ("table_filtering", "view_filtering", "multiselect_filtering"): result = getattr(app, f)(*args, request.form) elif form_type: form = form_classes[form_type](request.form) if not form.validate_on_submit(): return jsonify({"invalid_form": True, **{"errors": form.errors}}) result = getattr(app, f)(*args, **form_postprocessing(request.form)) else: result = getattr(app, f)(*args) try: Session.commit() return jsonify(result) except Exception as exc: raise exc Session.rollback() if app.config_mode == "Debug": raise return jsonify({"error": handle_exception(str(exc))})
def route(page): endpoint, *args = page.split("/") form_type = request.form.get("form_type") if endpoint in app.json_endpoints: result = getattr(app, endpoint)(*args, **request.json) elif form_type: form = form_classes[form_type](request.form) if not form.validate_on_submit(): return jsonify({"invalid_form": True, **{"errors": form.errors}}) result = getattr(app, endpoint)( *args, **form_postprocessing(form, request.form) ) else: result = getattr(app, endpoint)(*args) try: Session.commit() return jsonify(result) except Exception as exc: raise exc Session.rollback() if app.settings["app"]["config_mode"] == "debug": raise return jsonify({"alert": handle_exception(str(exc))})
def route(page: str) -> Response: f, *args = page.split("/") if f not in controller.valid_post_endpoints: return jsonify({"error": "Invalid POST request."}) form_type = request.form.get("form_type") if form_type: form = form_classes[form_type](request.form) if not form.validate_on_submit(): return jsonify({"invalid_form": True, **{"errors": form.errors}}) result = getattr(controller, f)(*args, **form_postprocessing(request.form)) elif f == "filtering": result = getattr(controller, f)(*args, request.form) else: result = getattr(controller, f)(*args) try: Session.commit() return jsonify(result) except Exception as exc: Session.rollback() if controller.enms_config_mode == "Debug": raise return jsonify({"error": handle_exception(str(exc))})
def topology_import(self, file: BinaryIO) -> str: book = open_workbook(file_contents=file.read()) result = "Topology successfully imported." for obj_type in ("Device", "Link"): try: sheet = book.sheet_by_name(obj_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): values = {"dont_update_pools": True} for index, property in enumerate(properties): func = field_conversion[property_types[property]] values[property] = func(sheet.row_values(row_index)[index]) try: factory(obj_type, **values).serialized except Exception as e: info(f"{str(values)} could not be imported ({str(e)})") result = "Partial import (see logs)." Session.commit() for pool in fetch_all("Pool"): pool.compute_pool() return result
def query_opennms(self, **kwargs): login = self.config["opennms"]["login"] password = environ.get("OPENNMS_PASSWORD") Session.commit() json_devices = http_get( self.config["opennms"]["devices"], headers={ "Accept": "application/json" }, auth=(login, password), ).json()["node"] devices = { device["id"]: { "name": device.get("label", device["id"]), "description": device["assetRecord"].get("description", ""), "location": device["assetRecord"].get("building", ""), "vendor": device["assetRecord"].get("manufacturer", ""), "model": device["assetRecord"].get("modelNumber", ""), "operating_system": device.get("operatingSystem", ""), "os_version": device["assetRecord"].get("sysDescription", ""), "longitude": device["assetRecord"].get("longitude", 0.0), "latitude": device["assetRecord"].get("latitude", 0.0), } for device in json_devices } for device in list(devices): link = http_get( f"{self.config['opennms']['rest_api']}/nodes/{device}/ipinterfaces", headers={ "Accept": "application/json" }, auth=(login, password), ).json() for interface in link["ipInterface"]: if interface["snmpPrimary"] == "P": devices[device]["ip_address"] = interface["ipAddress"] factory("device", **devices[device])
def job(self, run: "Run", payload: dict, device: Device) -> dict: try: now = datetime.now() path_configurations = Path.cwd() / "git" / "configurations" path_device_config = path_configurations / device.name path_device_config.mkdir(parents=True, exist_ok=True) netmiko_connection = run.netmiko_connection(device) try: netmiko_connection.enable() except Exception: pass run.log("info", f"Fetching configuration on {device.name} (Netmiko)") command = run.configuration_command config = netmiko_connection.send_command(command) device.last_status = "Success" device.last_runtime = (datetime.now() - now).total_seconds() if device.configurations: last_config = device.configurations[max(device.configurations)] if config == last_config: return {"success": True, "result": "no change"} device.configurations[str( now)] = device.current_configuration = config with open(path_device_config / device.name, "w") as file: file.write(config) device.last_update = str(now) self.generate_yaml_file(path_device_config, device) except Exception as e: device.last_status = "Failure" device.last_failure = str(now) self.generate_yaml_file(path_device_config, device) return {"success": False, "result": str(e)} if len(device.configurations) > self.number_of_configuration: device.configurations.pop(min(device.configurations)) Session.commit() return {"success": True, "result": f"Command: {command}"}
def recursive_search(run: "Run"): if not run: return None query = Session.query(models["run"]).filter( models["run"].parent_runtime == run.parent_runtime) if workflow or self.workflow: name = workflow or self.workflow.name query.filter(models["run"].workflow.has( models["workflow"].name == name)) runs = filter_run(query, "scoped_name") or filter_run( query, "name") results = list(filter(None, [run.result(device) for run in runs])) if not results: return recursive_search(run.restart_run) else: return results.pop().result
def migration_import(self, **kwargs: Any) -> str: status, types = "Import successful.", kwargs["import_export_types"] if kwargs.get("empty_database_before_import", False): for type in types: delete_all(type) Session.commit() workflow_edges: list = [] for cls in types: path = ( self.path / "projects" / "migrations" / kwargs["name"] / f"{cls}.yaml" ) with open(path, "r") as migration_file: objects = load(migration_file, Loader=BaseLoader) if cls == "Workflow": workflow_jobs = { workflow["name"]: workflow.pop("jobs") for workflow in objects } if cls == "WorkflowEdge": workflow_edges = deepcopy(objects) if cls == "Service": objects.sort(key=lambda s: s["type"] == "IterationService") for obj in objects: obj_cls = obj.pop("type") if cls == "Service" else cls obj = self.objectify(obj_cls, obj) try: factory(obj_cls, **obj) Session.commit() except Exception as e: info(f"{str(obj)} could not be imported ({str(e)})") status = "Partial import (see logs)." for name, jobs in workflow_jobs.items(): fetch("Workflow", name=name).jobs = [ fetch("Job", name=name) for name in jobs ] Session.commit() for edge in workflow_edges: for property in ("source", "destination", "workflow"): edge[property] = fetch("Job", name=edge[property]).id factory("WorkflowEdge", **edge) Session.commit() return status
def run(self, payload): self.init_state() self.run_state["status"] = "Running" start = datetime.now().replace(microsecond=0) try: app.service_db[self.service.id]["runs"] += 1 self.service.status = "Running" Session.commit() results = self.device_run(payload) except Exception: result = (f"Running {self.service.type} '{self.service.name}'" " raised the following exception:\n" f"{chr(10).join(format_exc().splitlines())}\n\n" "Run aborted...") self.log("error", result) results = { "success": False, "runtime": self.runtime, "result": result } finally: Session.commit() results["summary"] = self.run_state.get("summary", None) self.status = "Aborted" if self.stop else "Completed" self.run_state["status"] = self.status if self.run_state["success"] is not False: self.success = self.run_state["success"] = results["success"] if self.send_notification: results = self.notify(results) app.service_db[self.service.id]["runs"] -= 1 if not app.service_db[self.id]["runs"]: self.service.status = "Idle" results["duration"] = self.duration = str(datetime.now().replace( microsecond=0) - start) results["logs"] = app.run_logs.pop(self.runtime, []) if self.runtime == self.parent_runtime: self.state = results["state"] = app.run_db.pop(self.runtime) self.close_remaining_connections() if self.task and not self.task.frequency: self.task.is_active = False results["properties"] = { "run": self.properties, "service": self.service.get_properties(exclude=["positions"]), } if (self.runtime == self.parent_runtime or len(self.devices) > 1 or self.run_method == "once"): self.create_result(results) Session.commit() return results
def view_filtering(self, filter_type: str, **kwargs: Any) -> List[dict]: obj_type = filter_type.split("_")[0] model = models[obj_type] constraints = [] for property in filtering_properties[obj_type]: value = kwargs[property] if not value: continue filter = kwargs.get(f"form[{property}_filter]") if filter == "equality": constraint = getattr(model, property) == value elif filter == "inclusion" or DIALECT == "sqlite": constraint = getattr(model, property).contains(value) else: operator = "regexp" if DIALECT == "mysql" else "~" constraint = getattr(model, property).op(operator)(value) constraints.append(constraint) result = Session.query(model).filter(and_(*constraints)) pools = [int(id) for id in kwargs["pools"]] if pools: result = result.filter(model.pools.any(models["pool"].id.in_(pools))) return [d.view_properties for d in result.all()]
def run(self, payload=None): self.init_state() self.run_state["status"] = "Running" if payload is None: payload = self.service.initial_payload try: app.service_db[self.service.id]["runs"] += 1 Session.commit() results = self.device_run(payload) except Exception: result = (f"Running {self.service.type} '{self.service.name}'" " raised the following exception:\n" f"{chr(10).join(format_exc().splitlines())}\n\n" "Run aborted...") self.log("error", result) results = { "success": False, "runtime": self.runtime, "result": result } finally: Session.commit() self.status = "Aborted" if self.stop else "Completed" self.run_state["status"] = self.status if self.run_state["success"] is not False: self.run_state["success"] = results["success"] app.service_db[self.service.id]["runs"] -= 1 results["endtime"] = self.endtime = app.get_time() results["logs"] = app.run_logs.pop(self.runtime, None) if self.parent_runtime == self.runtime: self.state = results["state"] = app.run_db.pop( self.parent_runtime) if self.task and not self.task.frequency: self.task.is_active = False results["properties"] = { "run": self.properties, "service": self.service.get_properties(exclude=["positions"]), } if self.send_notification: results = self.notify(results) if self.push_to_git: self.git_push(results) self.create_result(results) Session.commit() return results
def clear_results(self, job_id: int) -> None: for result in fetch("Run", all_matches=True, allow_none=True, job_id=job_id): Session.delete(result)
def delete(self, cls: str, name: str) -> dict: result = delete(cls, name=name) Session.commit() return result