def compute_pool(self): if self.manually_defined: return self.devices = list(filter(self.object_match, db.fetch_all("device"))) self.device_number = len(self.devices) self.links = list(filter(self.object_match, db.fetch_all("link"))) self.link_number = len(self.links)
def test_device_deletion(user_client): create_from_file(user_client, "europe.xls") for device_name in routers: device = db.fetch("device", name=device_name) user_client.post(f"/delete_instance/device/{device.id}") assert len(db.fetch_all("device")) == 18 assert len(db.fetch_all("link")) == 18
def test_link_deletion(user_client): create_from_file(user_client, "europe.xls") for link_name in links: link = db.fetch("link", name=link_name) user_client.post(f"/delete_instance/link/{link.id}") assert len(db.fetch_all("device")) == 33 assert len(db.fetch_all("link")) == 38
def test_pool_management(user_client): create_from_file(user_client, "europe.xls") pool_number = len(db.fetch_all("pool")) user_client.post("/update/pool", data=create_pool(pool1)) user_client.post("/update/pool", data=create_pool(pool2)) p1, p2 = db.fetch("pool", name="pool1"), db.fetch("pool", name="pool2") assert len(p1.devices) == 21 assert len(p1.links) == 20 assert len(p2.devices) == 12 assert len(p2.links) == 4 assert len(db.fetch_all("pool")) == pool_number + 2
def test_user_management(user_client): for user in ("user1", "user2", "user3"): dict_user = { "form_type": "user", "name": user, "email": f"{user}@test.com", "group": "Admin", "password": user, } user_client.post("/update/user", data=dict_user) assert len(db.fetch_all("user")) == 4 user1 = db.fetch("user", name="user1") user_client.post("/delete_instance/user/{}".format(user1.id)) assert len(db.fetch_all("user")) == 3
def test_user_management(user_client): for user in ("user1", "user2", "user3"): dict_user = { "form_type": "user", "name": user, "email": f"{user}@test.com", "password": user, "authentication": "database", "theme": "dark", } user_client.post("/update/user", data=dict_user) assert len(db.fetch_all("user")) == 8 user1 = db.fetch("user", name="user1") user_client.post("/delete_instance/user/{}".format(user1.id)) assert len(db.fetch_all("user")) == 7
def update_database_configurations_from_git(self): for dir in scandir(self.path / "network_data"): device = db.fetch("device", allow_none=True, name=dir.name) timestamp_path = Path(dir.path) / "timestamps.json" if not device: continue try: with open(timestamp_path) as file: timestamps = load(file) except Exception: timestamps = {} for property in self.configuration_properties: for timestamp, value in timestamps.get(property, {}).items(): setattr(device, f"last_{property}_{timestamp}", value) filepath = Path(dir.path) / property if not filepath.exists(): continue with open(filepath) as file: setattr(device, property, file.read()) db.session.commit() for pool in db.fetch_all("pool"): if any( getattr(pool, f"device_{property}") for property in self.configuration_properties ): pool.compute_pool()
def delete_corrupted_edges(self): edges, duplicated_edges = db.fetch_all("workflow_edge"), defaultdict(list) number_of_corrupted_edges = 0 for edge in edges: services = getattr(edge.workflow, "services", []) if ( not edge.source or not edge.destination or not edge.workflow or edge.source not in services or edge.destination not in services ): db.session.delete(edge) number_of_corrupted_edges += 1 db.session.commit() for edge in edges: duplicated_edges[ ( edge.source.name, edge.destination.name, edge.workflow.name, edge.subtype, ) ].append(edge) for duplicates in duplicated_edges.values(): for duplicate in duplicates[1:]: db.session.delete(duplicate) number_of_corrupted_edges += 1 return number_of_corrupted_edges
def get_service_state(self, path, runtime=None): service_id, state = path.split(">")[-1], None service = db.fetch("service", id=service_id, allow_none=True) if not service: raise db.rbac_error runs = db.fetch_all("run", service_id=service_id) if not runtime: runtime = "latest" if runs and runtime != "normal": if runtime == "latest": runtime = runs[-1].parent_runtime latest_runs = [r for r in runs if r.parent_runtime == runtime] if latest_runs: state = latest_runs[0].get_state() return { "service": service.to_dict(include=["services", "edges", "superworkflow"]), "runtimes": sorted( set((run.parent_runtime, f"{run.parent_runtime} ({run.creator})") for run in runs), reverse=True, ), "state": state, "runtime": runtime, }
def update(self, **kwargs): relation = vs.relationships[self.__tablename__] for property, value in kwargs.items(): if not hasattr(self, property): continue property_type = vs.model_properties[self.__tablename__].get( property, None) if property in relation: if relation[property]["list"]: value = db.objectify(relation[property]["model"], value) elif value: value = db.fetch(relation[property]["model"], id=value) if property_type == "bool": value = value not in (False, "false") setattr(self, property, value) if not kwargs.get("update_pools") or not self.pool_model: return for pool in db.fetch_all("pool"): if pool.manually_defined: continue match = pool.match_instance(self) if match and self not in getattr(pool, f"{self.class_type}s"): getattr(pool, f"{self.class_type}s").append(self) if self in getattr(pool, f"{self.class_type}s") and not match: getattr(pool, f"{self.class_type}s").remove(self)
def get_device_logs(self, device_id): device_logs = [ log.name for log in db.fetch_all("log") if log.source == db.fetch("device", id=device_id).ip_address ] return "\n".join(device_logs)
def topology_import(self, file): book = open_workbook(file_contents=file.read()) status = "Topology successfully imported." for obj_type in ("device", "link"): try: sheet = book.sheet_by_name(obj_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): values = {} for index, property in enumerate(properties): if not property: continue func = db.field_conversion[property_types.get( property, "str")] values[property] = func(sheet.row_values(row_index)[index]) try: db.factory(obj_type, **values).serialized except Exception as exc: info(f"{str(values)} could not be imported ({str(exc)})") status = "Partial import (see logs)." db.session.commit() for pool in db.fetch_all("pool"): pool.compute_pool() self.log("info", status) return status
def test_rest_api_basic(user_client: FlaskClient): number_of_devices = len(db.fetch_all("device")) post( "http://192.168.105.2:5000/rest/instance/device", json={ "name": "new_router", "model": "Cisco" }, auth=HTTPBasicAuth("admin", "admin"), ) assert len(db.fetch_all("device")) == number_of_devices + 1 result = get( "http://192.168.105.2:5000/rest/instance/device/Washington", auth=HTTPBasicAuth("admin", "admin"), ).json() assert result["model"] == "Arista" post( "http://192.168.105.2:5000/rest/instance/device", json={ "name": "Washington", "model": "Cisco" }, auth=HTTPBasicAuth("admin", "admin"), ) result = get( "http://192.168.105.2:5000/rest/instance/device/Washington", auth=HTTPBasicAuth("admin", "admin"), ).json() assert result["model"] == "Cisco" result = get( "http://192.168.105.2:5000/rest/instance/service/get_facts", auth=HTTPBasicAuth("admin", "admin"), ).json() assert result["description"] == "Getter: get_facts" number_of_workflows = len(db.fetch_all("workflow")) result = post( "http://192.168.105.2:5000/rest/instance/workflow", json={ "name": "new_workflow", "description": "New" }, auth=HTTPBasicAuth("admin", "admin"), ).json() assert (result["description"] == "New" and len(db.fetch_all("workflow")) == number_of_workflows + 1)
def handle(self): address = self.client_address[0] device = db.fetch("device", allow_none=True, ip_address=address) properties = { "source": device.name if device else address, "content": str(bytes.decode(self.request[0].strip())), } for event in db.fetch_all("event"): event.match_log(**properties)
def compute_pool(self): if not self.manually_defined: for object_type in ("device", "link"): objects = (list( filter(self.object_match, db.fetch_all(object_type))) if self.compute(object_type) else []) setattr(self, f"{object_type}s", objects) self.device_number = len(self.devices) self.link_number = len(self.links)
def compute_pool(self): for model in self.models: if not self.manually_defined: instances = (list( filter(self.object_match, db.fetch_all(model))) if self.compute(model) else []) setattr(self, f"{model}s", instances) else: instances = getattr(self, f"{model}s") setattr(self, f"{model}_number", len(instances))
def search_workflow_services(self, *args, **kwargs): return [ "standalone", "shared", *[ workflow.name for workflow in db.fetch_all("workflow") if any(kwargs["str"].lower() in service.scoped_name.lower() for service in workflow.services) ], ]
def cluster_monitoring(self, run, payload): protocol = app.settings["cluster"]["scan_protocol"] for instance in db.fetch_all("instance"): db.factory( "instance", **get( f"{protocol}://{instance.ip_address}/rest/is_alive", timeout=app.settings["cluster"]["scan_timeout"], ).json(), ) return {"success": True}
def compute_pool(self): if self.manually_defined: return for object_type in ("device", "link"): objects = (list( filter(self.object_match, db.fetch_all(object_type))) if any( getattr(self, f"{object_type}_{property}") for property in properties["filtering"][object_type]) else []) setattr(self, f"{object_type}s", objects) setattr(self, f"{object_type}_number", len(objects)) self.update_rbac()
def calendar_init(self, type): results = {} for instance in db.fetch_all(type): if getattr(instance, "workflow", None): continue date = getattr(instance, "next_run_time" if type == "task" else "runtime") if date: results[instance.name] = { "start": self.convert_date(date), **instance.serialized, } return results
def count_models(self): return { "counters": { model: db.query(model).count() for model in properties["dashboard"] }, "properties": { model: Counter( str(getattr(instance, properties["dashboard"][model][0])) for instance in db.fetch_all(model)) for model in properties["dashboard"] }, }
def count_models(self): return { "counters": { instance_type: db.count(instance_type) for instance_type in properties["dashboard"] }, "properties": { instance_type: Counter( str(getattr(instance, properties["dashboard"][instance_type][0])) for instance in db.fetch_all(instance_type) ) for instance_type in properties["dashboard"] }, }
def update(self, **kwargs): super().update(**kwargs) if kwargs.get("dont_update_pools", False): return for pool in db.fetch_all("pool"): if pool.manually_defined or not pool.compute(self.class_type): continue match = pool.object_match(self) relation, number = f"{self.class_type}s", f"{self.class_type}_number" if match and self not in getattr(pool, relation): getattr(pool, relation).append(self) setattr(pool, number, getattr(pool, number) + 1) if self in getattr(pool, relation) and not match: getattr(pool, relation).remove(self) setattr(pool, number, getattr(pool, number) - 1)
def export_topology(self, **kwargs): workbook = Workbook() filename = kwargs["export_filename"] if "." not in filename: filename += ".xls" for obj_type in ("device", "link"): sheet = workbook.add_sheet(obj_type) for index, property in enumerate(model_properties[obj_type]): if property in db.dont_migrate[obj_type]: continue sheet.write(0, index, property) for obj_index, obj in enumerate(db.fetch_all(obj_type), 1): value = getattr(obj, property) if type(value) == bytes: value = str(self.decrypt(value), "utf-8") sheet.write(obj_index, index, str(value)) workbook.save(self.path / "files" / "spreadsheets" / filename)
def update_database_configurations_from_git(self): for dir in scandir(self.path / "network_data"): device = db.fetch("device", allow_none=True, name=dir.name) if not device: continue with open(Path(dir.path) / "data.yml") as data: parameters = yaml.load(data) device.update(**{"dont_update_pools": True, **parameters}) for data in ("configuration", "operational_data"): filepath = Path(dir.path) / data if not filepath.exists(): continue with open(filepath) as file: setattr(device, data, file.read()) db.session.commit() for pool in db.fetch_all("pool"): if pool.device_configuration or pool.device_operational_data: pool.compute_pool()
def get_service_state(self, path, runtime=None): service_id = path.split(">")[-1] state, service = None, db.fetch("service", id=service_id) runs = db.fetch_all("run", service_id=service_id) if not runtime: runtime = "latest" if runs and runtime != "normal": if runtime == "latest": runtime = runs[-1].parent_runtime state = db.fetch("run", runtime=runtime).get_state() return { "service": service.to_dict(include=["services", "edges", "superworkflow"]), "runtimes": sorted(set((r.parent_runtime, r.creator) for r in runs)), "state": state, "runtime": runtime, }
def calendar_init(self, type): results = {} for instance in db.fetch_all(type): if getattr(instance, "workflow", None): continue date = getattr(instance, "next_run_time" if type == "task" else "runtime") python_month = search(r".*-(\d{2})-.*", date) if not python_month: continue month = "{:02}".format((int(python_month.group(1)) - 1) % 12) start = [ int(i) for i in sub( r"(\d+)-(\d+)-(\d+) (\d+):(\d+).*", r"\1," + month + r",\3,\4,\5", date, ).split(",") ] results[instance.name] = {"start": start, **instance.serialized} return results
def delete_corrupted_edges(self): edges, duplicated_edges = db.fetch_all("workflow_edge"), defaultdict( list) for edge in edges: duplicated_edges[( edge.source.name, edge.destination.name, edge.workflow.name, edge.subtype, )].append(edge) number_of_corrupted_edges = 0 for duplicates in duplicated_edges.values(): if len(duplicates) == 1: continue for duplicate in duplicates[1:]: db.session.delete(duplicate) number_of_corrupted_edges += 1 for edge in edges: services = edge.workflow.services if edge.source not in services or edge.destination not in services: db.session.delete(edge) number_of_corrupted_edges += 1 return number_of_corrupted_edges
def test_create_logs(user_client): number_of_logs = len(db.fetch_all("changelog")) for i in range(10): env.log("warning", str(i)) db.session.commit() assert len(db.fetch_all("changelog")) == number_of_logs + 11
def test_netmiko_napalm_config(user_client): create_from_file(user_client, "europe.xls") user_client.post("/update/task", data=instant_task) assert len(db.fetch_all("task")) == 4 user_client.post("/update/task", data=scheduled_task) assert len(db.fetch_all("task")) == 5