def route(page): endpoint, *args = page.split("/") if f"/{endpoint}" not in app.rbac["endpoints"]["POST"]: return jsonify({"alert": "Invalid POST request."}) if f"/{endpoint}" in app.rbac["groups"][current_user.group]["POST"]: return jsonify({"alert": "Error 403 Forbidden."}) form_type = request.form.get("form_type") if endpoint in app.json_endpoints: result = getattr(app, endpoint)(*args, **request.json) elif form_type: form = form_classes[form_type](request.form) if not form.validate_on_submit(): return jsonify({"invalid_form": True, **{"errors": form.errors}}) result = getattr(app, endpoint)(*args, **form_postprocessing(form, request.form)) else: result = getattr(app, endpoint)(*args) try: Session.commit() return jsonify(result) except Exception as exc: raise exc Session.rollback() if app.settings["app"]["config_mode"] == "debug": raise return jsonify({"alert": handle_exception(str(exc))})
def duplicate_workflow(self, workflow_id: int, **kwargs: Any) -> dict: parent_workflow = fetch("Workflow", id=workflow_id) new_workflow = factory("Workflow", **kwargs) Session.commit() for job in parent_workflow.jobs: new_workflow.jobs.append(job) job.positions[new_workflow.name] = job.positions[ parent_workflow.name] Session.commit() for edge in parent_workflow.edges: subtype, src, destination = edge.subtype, edge.source, edge.destination new_workflow.edges.append( factory( "WorkflowEdge", **{ "name": (f"{new_workflow.id}-{subtype}:" f"{src.id}->{destination.id}"), "workflow": new_workflow.id, "subtype": subtype, "source": src.id, "destination": destination.id, }, )) return new_workflow.serialized
def copy_service_in_workflow(self, workflow_id, **kwargs): service_sets = list(set(kwargs["services"].split(","))) service_instances = objectify("service", service_sets) workflow = fetch("workflow", id=workflow_id) services, errors = [], [] if kwargs["mode"] == "shallow": for service in service_instances: if not service.shared: errors.append(f"'{service.name}' is not a shared service.") elif service in workflow.services: errors.append( f"This workflow already contains '{service.name}'.") if errors: return {"error": errors} for service in service_instances: if kwargs["mode"] == "deep": service = service.duplicate(workflow) else: workflow.services.append(service) services.append(service) workflow.last_modified = self.get_time() Session.commit() return { "services": [service.serialized for service in services], "update_time": workflow.last_modified, }
def duplicate_workflow(self, workflow_id, **kwargs): parent_workflow = fetch("workflow", id=workflow_id) new_workflow = factory("workflow", **kwargs) Session.commit() for service in parent_workflow.services: new_workflow.services.append(service) service.positions[new_workflow.name] = service.positions[ parent_workflow.name] Session.commit() for edge in parent_workflow.edges: subtype, src, destination = edge.subtype, edge.source, edge.destination new_workflow.edges.append( factory( "workflow_edge", **{ "name": (f"{new_workflow.id}-{subtype}:" f"{src.id}->{destination.id}"), "workflow": new_workflow.id, "subtype": subtype, "source": src.id, "destination": destination.id, }, )) return new_workflow.serialized
def query_opennms(self, **kwargs: str) -> None: login, password = self.opennms_login, kwargs["password"] Session.commit() json_devices = http_get( self.opennms_devices, headers={"Accept": "application/json"}, auth=(login, password), ).json()["node"] devices = { device["id"]: { "name": device.get("label", device["id"]), "description": device["assetRecord"].get("description", ""), "location": device["assetRecord"].get("building", ""), "vendor": device["assetRecord"].get("manufacturer", ""), "model": device["assetRecord"].get("modelNumber", ""), "operating_system": device.get("operatingSystem", ""), "os_version": device["assetRecord"].get("sysDescription", ""), "longitude": device["assetRecord"].get("longitude", 0.0), "latitude": device["assetRecord"].get("latitude", 0.0), } for device in json_devices } for device in list(devices): link = http_get( f"{self.opennms_rest_api}/nodes/{device}/ipinterfaces", headers={"Accept": "application/json"}, auth=(login, password), ).json() for interface in link["ipInterface"]: if interface["snmpPrimary"] == "P": devices[device]["ip_address"] = interface["ipAddress"] factory("Device", **devices[device])
def import_jobs(self, **kwargs: Any) -> None: jobs = kwargs["jobs_to_import"] path = self.path / "projects" / "exported_jobs" for file in scandir(path / "services"): if file.name == ".gitkeep" or file.name not in jobs: continue with open(file.path, "r") as instance_file: instance = yaml.load(instance_file) model = instance.pop("type") factory(model, **self.objectify(model, instance)) Session.commit() for workflow in listdir(path / "workflows"): if workflow == ".gitkeep" or workflow not in jobs: continue workflow_name = workflow.split(".")[0] with open_tar(path / "workflows" / workflow) as tar_file: tar_file.extractall(path=path / "workflows") for instance_type in ("jobs", "workflow", "edges"): path_job = path / "workflows" / workflow_name / instance_type for file in scandir(path_job): with open(path_job / file.name, "r") as instance_file: instance = yaml.load(instance_file) model = instance.pop("type") factory(model, **self.objectify(model, instance)) Session.commit() rmtree(path / "workflows" / workflow_name)
def clean_database(self): for run in fetch("run", all_matches=True, allow_none=True, status="Running"): run.status = "Aborted (app reload)" Session.commit()
def end_run( self, runtime: str, results: dict, parent: Optional["Job"], task: Optional["Task"], ) -> None: for library in ("netmiko", "napalm"): connections = controller.connections_cache[library].pop( self.name, None) if not connections: continue for device, conn in connections.items(): info(f"Closing Netmiko Connection to {device}") conn.disconnect() if library == "netmiko" else conn.close() current_job = parent or self current_job.logs.append(f"{self.type} {self.name}: Finished.") self.is_running, self.state = False, {} self.completed = self.failed = 0 if task and not task.frequency: task.is_active = False if not parent: Session.commit() if not parent and self.send_notification: self.notify(results, runtime)
def job(self, payload: dict, device: Device, parent: Optional[Job] = None) -> dict: try: now = datetime.now() path_configurations = Path.cwd() / "git" / "configurations" path_device_config = path_configurations / device.name path_device_config.mkdir(parents=True, exist_ok=True) napalm_connection = self.napalm_connection(device, parent) self.logs.append( f"Fetching configuration on {device.name} (Napalm)") config = controller.str_dict(napalm_connection.get_config()) device.last_status = "Success" device.last_runtime = (datetime.now() - now).total_seconds() if device.configurations: last_config = device.configurations[max(device.configurations)] if config == last_config: return {"success": True, "result": "no change"} device.configurations[str( now)] = device.current_configuration = config with open(path_device_config / device.name, "w") as file: file.write(config) device.last_update = str(now) self.generate_yaml_file(path_device_config, device) except Exception as e: device.last_status = "Failure" device.last_failure = str(now) self.generate_yaml_file(path_device_config, device) return {"success": False, "result": str(e)} if len(device.configurations) > self.number_of_configuration: device.configurations.pop(min(device.configurations)) Session.commit() return {"success": True, "result": "Get Config via Napalm"}
def handoffssh(self, id, **kwargs): device = fetch("device", id=id) credentials = ( (device.username, device.password) if kwargs["credentials"] == "device" else (current_user.name, current_user.password) if kwargs["credentials"] == "user" else (kwargs["username"], kwargs["password"])) uuid, port = str(uuid4()), self.get_ssh_port() session = factory( "session", name=uuid, user=current_user.name, timestamp=self.get_time(), device=device.id, ) Session.commit() try: ssh_connection = SshConnection(device.ip_address, *credentials, session.id, uuid, port) Thread( target=ssh_connection.start_session, args=(session.id, uuid, port), ).start() return { "port": port, "username": uuid, "device_name": device.name, "device_ip": device.ip_address, } except Exception as exc: return {"error": exc.args}
def topology_import(self, file): book = open_workbook(file_contents=file.read()) status = "Topology successfully imported." for obj_type in ("device", "link"): try: sheet = book.sheet_by_name(obj_type) except XLRDError: continue properties = sheet.row_values(0) for row_index in range(1, sheet.nrows): values = {"dont_update_pools": True} for index, property in enumerate(properties): if not property: continue func = field_conversion[property_types.get( property, "str")] values[property] = func(sheet.row_values(row_index)[index]) try: factory(obj_type, **values).serialized except Exception as exc: info(f"{str(values)} could not be imported ({str(exc)})") status = "Partial import (see logs)." Session.commit() for pool in fetch_all("pool"): pool.compute_pool() self.log("info", status) return status
def get_results( self, payload: dict, device: Optional["Device"] = None, parent: Optional["Job"] = None, ) -> Tuple[dict, list]: logs = [] try: if device: log = f"Running {self.type} on {device.name}." info(log) logs.append(log) results = self.job(payload, device, parent) success = "SUCCESS" if results["success"] else "FAILURE" logs.append( f"Finished running service on {device.name}. ({success})") else: info(f"Running {self.type}") results = self.job(payload, parent) except Exception: if device: logs.append( f"Finished running service on {device.name}. (FAILURE)") results = { "success": False, "result": chr(10).join(format_exc().splitlines()), } if not parent and not self.multiprocessing: self.completed += 1 self.failed += 1 - results["success"] self.logs.extend(logs) Session.commit() return results, logs
def job(self, run: "Run", payload: dict, device: Device) -> dict: try: now = datetime.now() path_configurations = Path.cwd() / "git" / "configurations" path_device_config = path_configurations / device.name path_device_config.mkdir(parents=True, exist_ok=True) netmiko_connection = run.netmiko_connection(device) try: netmiko_connection.enable() except Exception: pass run.log("info", f"Fetching configuration on {device.name} (Netmiko)") command = run.configuration_command config = netmiko_connection.send_command(command) device.last_status = "Success" device.last_runtime = (datetime.now() - now).total_seconds() if device.configurations: last_config = device.configurations[max(device.configurations)] if config == last_config: return {"success": True, "result": "no change"} device.configurations[str(now)] = device.current_configuration = config with open(path_device_config / device.name, "w") as file: file.write(config) device.last_update = str(now) self.generate_yaml_file(path_device_config, device) except Exception as e: device.last_status = "Failure" device.last_failure = str(now) self.generate_yaml_file(path_device_config, device) return {"success": False, "result": str(e)} if len(device.configurations) > self.number_of_configuration: device.configurations.pop(min(device.configurations)) Session.commit() return {"success": True, "result": f"Command: {command}"}
def build_results( self, payload: Optional[dict] = None, targets: Optional[Set["Device"]] = None, parent: Optional["Job"] = None, origin: Optional["Job"] = None, ) -> dict: self.state = {"jobs": {}} start_job = origin or self.jobs[0] jobs: list = [start_job] visited: Set = set() results: dict = {"results": payload or {}, "success": False} allowed_devices: dict = defaultdict(set) if self.use_workflow_targets: allowed_devices[ start_job.name] = targets or self.compute_devices(payload) while jobs: job = jobs.pop() if any(node not in visited for node in job.adjacent_jobs( self, "source", "prerequisite")): continue visited.add(job) self.state["current_job"] = job.get_properties() Session.commit() valid_devices = self.compute_valid_devices(job, allowed_devices, results["results"]) job_results, _ = job.run(results["results"], targets=valid_devices, parent=self) self.state["jobs"][job.id] = job_results["success"] if self.use_workflow_targets: successors = self.workflow_targets_processing( allowed_devices, job, job_results) else: successors = job.adjacent_jobs( self, "destination", "success" if job_results["success"] else "failure", ) if job.type == "Workflow": job_results.pop("results") results["results"][job.name] = job_results for successor in successors: if successor not in visited: jobs.append(successor) if not self.use_workflow_targets and successor == self.jobs[1]: results["success"] = True sleep(job.waiting_time) if self.use_workflow_targets: start_devices = allowed_devices[start_job.name] end_devices = allowed_devices["End"] results["devices"] = { device.name: { "success": device in end_devices } for device in start_devices } results["success"] = start_devices == end_devices return results
def get_results(self, payload, device=None): self.log("info", "STARTING", device) start = datetime.now().replace(microsecond=0) skip_service = False if self.skip_query: skip_service = self.eval(self.skip_query, **locals())[0] if skip_service or self.skip: if device: self.run_state["progress"]["device"]["skipped"] += 1 key = "success" if self.skip_value == "True" else "failure" self.run_state["summary"][key].append(device.name) return { "result": "skipped", "success": self.skip_value == "True", } results = {"runtime": app.get_time(), "logs": []} try: if self.restart_run and self.service.type == "workflow": old_result = self.restart_run.result( device=device.name if device else None) if old_result and "payload" in old_result.result: payload.update(old_result["payload"]) if self.service.iteration_values: targets_results = {} for target in self.eval(self.service.iteration_values, **locals())[0]: self.payload_helper(payload, self.iteration_variable_name, target) targets_results[str(target)] = self.run_service_job( payload, device) results.update({ "result": targets_results, "success": all(r["success"] for r in targets_results.values()), }) else: results.update(self.run_service_job(payload, device)) except Exception: results.update({ "success": False, "result": chr(10).join(format_exc().splitlines()) }) self.log("error", chr(10).join(format_exc().splitlines()), device) results["duration"] = str(datetime.now().replace(microsecond=0) - start) if device: status = "success" if results["success"] else "failure" self.run_state["progress"]["device"][status] += 1 self.run_state["summary"][status].append(device.name) self.create_result(results, device) Session.commit() self.log("info", "FINISHED", device) if self.waiting_time: self.log("info", f"SLEEP {self.waiting_time} seconds...", device) sleep(self.waiting_time) return results
def update(self, cls: str, **kwargs: Any) -> dict: try: instance = factory(cls, **kwargs) Session.commit() return instance.serialized except JSONDecodeError: return {"error": "Invalid JSON syntax (JSON field)"} except IntegrityError: return {"error": "An object with the same name already exists"}
def start(name, devices, payload): devices_list = devices.split(",") if devices else [] devices_list = [fetch("device", name=name).id for name in devices_list] payload_dict = loads(payload) if payload else {} payload_dict["devices"] = devices_list service = fetch("service", name=name) results = app.run(service.id, **payload_dict) Session.commit() echo(app.str_dict(results))
def init_run(self, parent: Optional["Job"]) -> None: current_job = parent or self self.is_running, self.state = True, {} if parent: current_job.logs.extend([f"{self.type} {self.name}: Starting."]) else: current_job.logs = [f"{self.type} {self.name}: Starting."] if not parent: Session.commit()
def poller_service(self, run: "Run", payload: dict) -> dict: for service in fetch_all("Service"): if getattr(service, "configuration_backup_service", False): run_job(service.id) Session.commit() for pool in fetch_all("Pool"): if pool.device_current_configuration: pool.compute_pool() return {"success": True}
def start(name: str, devices: str, payload: str) -> None: devices_list = devices.split(",") if devices else [] devices_list = [fetch("Device", name=name).id for name in devices_list] payload_dict = loads(payload) if payload else {} payload_dict["devices"] = devices_list job = fetch("Job", name=name) results = run_job(job.id, **payload_dict) Session.commit() echo(controller.str_dict(results))
def post(self, cls): try: data = request.get_json(force=True) object_data = app.objectify(cls, data) result = factory(cls, **object_data).serialized Session.commit() return result except Exception as exc: return abort(500, message=f"Update failed ({exc})")
def import_topology(self, **kwargs: Any) -> str: file = kwargs["file"] if kwargs["replace"]: delete_all("Device") Session.commit() if self.allowed_file(secure_filename(file.filename), {"xls", "xlsx"}): result = self.topology_import(file) info("Inventory import: Done.") return result
def post(self, cls: str) -> dict: try: data = request.get_json(force=True) object_data = controller.objectify(cls.capitalize(), data) result = factory(cls, **object_data).serialized Session.commit() return result except Exception as exc: return abort(500, message=f"Update failed ({exc})")
def init_database(self) -> None: self.init_parameters() self.configure_server_id() self.create_admin_user() Session.commit() if self.create_examples: self.migration_import(name="examples", import_export_types=import_classes) self.update_credentials() else: self.migration_import(name="default", import_export_types=import_classes) self.get_git_content() Session.commit()
def handle(self) -> None: address = self.client_address[0] device = fetch("Device", allow_none=True, ip_address=address) properties = { "source": device.name if device else address, "content": str(bytes.decode(self.request[0].strip())), } for event in fetch_all("Event"): event.match_log(**properties) log = factory("Syslog", **properties) Session.add(log) Session.commit()
def build_results( self, payload: Optional[dict] = None, targets: Optional[Set["Device"]] = None, parent: Optional["Job"] = None, *other: Any, ) -> dict: current_job = parent or self results: dict = {"results": {}, "success": False} if self.has_targets and not targets: targets = self.compute_devices(payload) if targets: results["results"]["devices"] = {} for i in range(self.number_of_retries + 1): current_job.logs.append( f"Running {self.type} {self.name} (attempt n°{i + 1})") self.completed = self.failed = 0 if not parent: Session.commit() attempt, logs = self.device_run(payload or {}, targets, parent) Session.commit() current_job.logs.extend(logs) if targets: assert targets is not None for device in set(targets): if not attempt["devices"][device.name]["success"]: continue results["results"]["devices"][ device.name] = attempt["devices"][device.name] targets.remove(device) if not targets: results["success"] = True break else: if self.number_of_retries: results[f"Attempt {i + 1}"] = attempt if i != self.number_of_retries: sleep(self.time_between_retries) else: for device in targets: results["results"]["devices"][ device.name] = attempt["devices"][device.name] else: if self.number_of_retries: results[f"Attempts {i + 1}"] = attempt if attempt["success"] or i == self.number_of_retries: results["results"] = attempt results["success"] = attempt["success"] break else: sleep(self.time_between_retries) return results
def result_log_deletion(self, **kwargs): date_time_object = datetime.strptime(kwargs["date_time"], "%d/%m/%Y %H:%M:%S") date_time_string = date_time_object.strftime("%Y-%m-%d %H:%M:%S.%f") for model in kwargs["deletion_types"]: if model == "result": field_name = "runtime" elif model == "changelog": field_name = "time" session_query = Session.query(models[model]).filter( getattr(models[model], field_name) < date_time_string) session_query.delete(synchronize_session=False) Session.commit()
def start(name: str, devices: str, payload: str) -> None: if devices: targets = { fetch("Device", name=name) for name in devices.split(",") } else: targets = set() if payload: payload = loads(payload) results = fetch("Job", name=name).run(targets=targets, payload=payload)[0] Session.commit() echo(controller.str_dict(results))
def copy_service_in_workflow(self, workflow_id, **kwargs): service = fetch("service", id=kwargs["services"]) workflow = fetch("workflow", id=workflow_id) if kwargs["mode"] == "deep": service = service.duplicate(workflow) elif not service.shared: return {"error": "This is not a shared service."} elif service in workflow.services: return {"error": f"This workflow already contains {service.name}."} else: workflow.services.append(service) workflow.last_modified = self.get_time() Session.commit() return {"service": service.serialized, "update_time": workflow.last_modified}
def compute_devices(self, payload: Optional[dict] = None) -> Set["Device"]: if self.define_devices_from_payload: engine = factory.YaqlFactory().create() devices = { fetch("Device", **{self.query_property_type: value}) for value in engine(self.yaql_query).evaluate(data=payload) } else: devices = set(self.devices) for pool in self.pools: devices |= set(pool.devices) self.number_of_targets = len(devices) Session.commit() return devices