def authenticate_user(self, **kwargs: str) -> Base: name, password = kwargs["name"], kwargs["password"] if kwargs["authentication_method"] == "Local User": user = fetch("User", allow_none=True, name=name) return user if user and password == user.password else False elif kwargs["authentication_method"] == "LDAP Domain": with Connection( self.ldap_client, user=f"{self.ldap_userdn}\\{name}", password=password, auto_bind=True, authentication=NTLM, ) as connection: connection.search( self.ldap_basedn, f"(&(objectClass=person)(samaccountname={name}))", search_scope=SUBTREE, get_operational_attributes=True, attributes=["cn", "memberOf", "mail"], ) json_response = loads( connection.response_to_json())["entries"][0] if json_response and any( group in s for group in self.ldap_admin_group.split(",") for s in json_response["attributes"]["memberOf"]): user = factory( "User", **{ "name": name, "password": password, "email": json_response["attributes"].get("mail", ""), }, ) elif kwargs["authentication_method"] == "TACACS": if self.tacacs_client.authenticate(name, password).valid: user = factory("User", **{"name": name, "password": password}) Session.commit() return user
def configure_database(self): self.init_services() Base.metadata.create_all(bind=engine) configure_mappers() configure_events(self) self.init_forms() self.clean_database() if not fetch("user", allow_none=True, name="admin"): self.configure_server_id() self.create_admin_user() Session.commit() if self.settings["app"]["create_examples"]: self.migration_import( name="examples", import_export_types=import_classes ) self.update_credentials() else: self.migration_import( name="default", import_export_types=import_classes ) self.get_git_content() Session.commit()
def run_job(self, id: Optional[int] = None, **kwargs: Any) -> dict: for property in ("user", "csrf_token", "form_type"): kwargs.pop(property, None) kwargs["creator"] = getattr(current_user, "name", "admin") job = fetch("Job", id=id) if job.type == "Workflow": if not kwargs.get("payload"): kwargs["payload"] = {} kwargs["payload"].update(self.add_restart_payload(job, **kwargs)) kwargs["runtime"] = runtime = self.get_time() if kwargs.get("asynchronous", True): self.scheduler.add_job( id=self.get_time(), func=run_job, run_date=datetime.now(), args=[id], kwargs=kwargs, trigger="date", ) else: job.run(runtime=runtime) return {**job.serialized, "runtime": runtime}
def get_job_list(self, id: int, **kw: Any) -> list: comp = "_compare" if kw["compare"] else "" defaults = [ ("all", "All jobs"), ("all failed", "All jobs that failed"), ("all passed", "All jobs that passed"), ] return defaults + list( dict.fromkeys( (run.job_id, run.job.name) for run in sorted( fetch( "Run", parent_runtime=kw.get(f"runtime{comp}"), allow_none=True, all_matches=True, ), key=attrgetter("runtime"), ) if run.job_id ) )
def export_job(self, job_id: str) -> None: job = fetch("Job", id=job_id) if job.type == "Workflow": path = self.path / "projects" / "exported_jobs" / "workflows" / job.filename path.mkdir(parents=True, exist_ok=True) for instance_type in ("jobs", "workflow", "edges"): Path(path / instance_type).mkdir(parents=True, exist_ok=True) for sub_job in job.jobs: with open(path / "jobs" / f"{sub_job.filename}.yaml", "w") as file: sub_job_as_dict = sub_job.to_dict(export=True) for relation in ("devices", "pools", "events"): sub_job_as_dict.pop(relation) if sub_job.type == "Workflow": sub_job_as_dict["type"] = "Workflow" yaml.dump(sub_job_as_dict, file) for edge in job.edges: name = self.strip_all( f"{edge.workflow}{edge.source}{edge.destination}") with open(path / "edges" / f"{name}.yaml", "w") as file: edge = { **edge.to_dict(export=True), "type": "WorkflowEdge" } yaml.dump(edge, file) with open(path / "workflow" / f"{job.filename}.yaml", "w") as file: job_as_dict = job.to_dict(export=True) for relation in ("devices", "pools", "events"): job_as_dict.pop(relation) yaml.dump({**job_as_dict, "type": "Workflow"}, file) with open_tar(f"{path}.tgz", "w:gz") as tar: tar.add(path, arcname=job.filename) rmtree(path) else: path = self.path / "projects" / "exported_jobs" / "services" with open(path / f"{job.filename}.yaml", "w") as file: job_as_dict = job.to_dict(export=True) for relation in ("devices", "pools", "events"): job_as_dict.pop(relation) yaml.dump(job_as_dict, file)
def compute_devices(self, payload: dict) -> Set["Device"]: if self.job.python_query: values = self.eval(self.job.python_query, **locals()) devices, not_found = [], [] if isinstance(values, str): values = [values] for value in values: device = fetch( "Device", allow_none=True, **{self.job.query_property_type: value} ) if device: devices.append(device) else: not_found.append(value) if not_found: raise Exception(f"Python query invalid targets: {', '.join(not_found)}") else: devices = set(self.devices) # type: ignore for pool in self.pools: devices |= set(pool.devices) # type: ignore self.set_state(number_of_targets=len(devices)) return devices # type: ignore
def get_result(self, job: str, device: Optional[str] = None) -> Optional[dict]: job_id = fetch("Job", name=job).id def recursive_search(run: "Run") -> Optional[dict]: if not run: return None runs = fetch( "Run", allow_none=True, all_matches=True, parent_runtime=run.parent_runtime, job_id=job_id, ) results: list = list( filter(None, [run.result(device) for run in runs])) if not results: return recursive_search(run.restart_run) else: return results.pop().result return recursive_search(self)
def connection(self, device_id, **kwargs): device = fetch("device", id=device_id) cmd = [str(self.path / "files" / "apps" / "gotty"), "-w"] port, protocol = self.get_ssh_port(), kwargs["protocol"] address = getattr(device, kwargs["address"]) cmd.extend(["-p", str(port)]) if "accept-once" in kwargs: cmd.append("--once") if "multiplexing" in kwargs: cmd.extend(f"tmux new -A -s gotty{port}".split()) if self.settings["ssh"]["bypass_key_prompt"]: options = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" else: options = "" if protocol == "telnet": cmd.extend(f"telnet {address}".split()) elif "authentication" in kwargs: login, pwd = ( (device.username, device.password) if kwargs["credentials"] == "device" else (current_user.name, current_user.password) if kwargs["credentials"] == "user" else (kwargs["username"], kwargs["password"])) cmd.extend( f"sshpass -p {pwd} ssh {options} {login}@{address}".split()) else: cmd.extend(f"ssh {options} {address}".split()) if protocol != "telnet": cmd.extend(f"-p {device.port}".split()) Popen(cmd) return { "device": device.name, "port": port, "redirection": self.settings["ssh"]["port_redirection"], "server_addr": self.settings["app"]["address"], }
def export_job(self, job_id: str) -> None: job = fetch("Job", id=job_id) if job.type == "Workflow": path = self.path / "projects" / "exported_jobs" / "workflows" / job.name path.mkdir(parents=True, exist_ok=True) for instance_type in ("jobs", "workflow", "edges"): Path(path / instance_type).mkdir(parents=True, exist_ok=True) for sub_job in job.jobs: with open(path / "jobs" / f"{sub_job.name}.yaml", "w") as file: sub_job_as_dict = sub_job.to_dict(export=True) if sub_job.type == "Workflow": sub_job_as_dict["type"] = "Workflow" dump(sub_job_as_dict, file) for edge in job.edges: name = f"{edge.workflow}{edge.source}{edge.destination}" with open(path / "edges" / f"{name}.yaml", "w") as file: edge = {**edge.to_dict(export=True), "type": "WorkflowEdge"} dump(edge, file) with open(path / "workflow" / f"{job.name}.yaml", "w") as file: dump({**job.to_dict(export=True), "type": "Workflow"}, file) else: path = self.path / "projects" / "exported_jobs" / "services" with open(path / f"{job.name}.yaml", "w") as file: dump(job.to_dict(export=True), file)
def get_device_list(self, id: int, **kw: Any) -> list: comp = "_compare" if kw["compare"] else "" defaults = [ ("global", "Entire job payload"), ("all", "All devices"), ("all failed", "All devices that failed"), ("all passed", "All devices that passed"), ] if "runtime" not in kw: request: Any = {"id": id} else: runtime_key = "parent_runtime" if "job" in kw else "runtime" request = {runtime_key: kw.get(f"runtime{comp}", id)} if kw.get(f"job{comp}") not in ("global", "all"): request["job_id"] = kw.get(f"job{comp}", id) if kw.get(f"workflow_device{comp}"): request["workflow_device_id"] = kw.get( f"workflow_device{comp}") runs = fetch("Run", allow_none=True, **request) if not runs: return defaults return defaults + list( set((result.device_id, result.device_name) for result in runs.results if result.device_id))
def run_service(self, path, **kwargs): path_ids = str(path).split(">") if kwargs.get("restart_from_top_level_workflow", False): kwargs["restart_path"] = f"{path}>{'-'.join(kwargs['start_services'])}" service_id = path_ids[0] else: service_id = path_ids[-1] for property in ("user", "csrf_token", "form_type"): kwargs.pop(property, None) kwargs["creator"] = getattr(current_user, "name", "admin") service = fetch("service", id=service_id) kwargs["runtime"] = runtime = self.get_time() if kwargs.get("asynchronous", True): self.scheduler.add_job( id=self.get_time(), func=self.run, run_date=datetime.now(), args=[service_id], kwargs=kwargs, trigger="date", ) else: service.run(runtime=runtime) return {"service": service.serialized, "runtime": runtime}
def get_password(username): return getattr(fetch("user", name=username), "password", False)
def get(self, instance_type, id): return fetch(instance_type, id=id).serialized
def user_loader(id): return fetch("user", allow_none=True, id=id)
def request_loader(request): return fetch("user", allow_none=True, name=request.form.get("name"))
def get(self, cls: str, name: str) -> dict: try: return fetch(cls, name=name).serialized except Exception: return abort(404, message=f"{cls} {name} not found.")
def get(self, name: str) -> str: device = fetch("Device", name=name) return device.configurations[max(device.configurations)]
def tracking_bfs(self, run, payload): number_of_runs = defaultdict(int) start = fetch("service", scoped_name="Start") end = fetch("service", scoped_name="End") services = [fetch("service", id=id) for id in run.start_services] visited, success, targets = set(), False, defaultdict(set) restart_run = run.restart_run for service in services: targets[service.name] |= {device.name for device in run.devices} while services: if run.stop: return {"payload": payload, "success": False} service = services.pop() if number_of_runs[service.name] >= service.maximum_runs or any( node not in visited for node, _ in service.adjacent_services( self, "source", "prerequisite")): continue number_of_runs[service.name] += 1 visited.add(service) if service in (start, end): results = { "summary": { "success": {device.name for device in run.devices}, "failure": [], }, "success": True, } else: kwargs = { "devices": [ fetch("device", name=name).id for name in targets[service.name] ], "service": service.id, "workflow": self.id, "restart_run": restart_run, "parent": run, "parent_runtime": run.parent_runtime, } if run.parent_device_id: kwargs["parent_device"] = run.parent_device_id service_run = factory("run", **kwargs) results = service_run.run(payload) if service.run_method in ("once", "per_service_with_service_targets"): edge_type = "success" if results["success"] else "failure" for successor, edge in service.adjacent_services( self, "destination", edge_type): targets[successor.name] |= targets[service.name] services.append(successor) run.edge_state[edge.id] += len(targets[service.name]) else: summary = results.get("summary", {}) for edge_type in ("success", "failure"): for successor, edge in service.adjacent_services( self, "destination", edge_type, ): if not summary[edge_type]: continue targets[successor.name] |= set(summary[edge_type]) services.append(successor) run.edge_state[edge.id] += len(summary[edge_type]) success_devices = targets[end.name] failure_devices = targets[start.name] - success_devices success = not failure_devices summary = { "success": list(success_devices), "failure": list(failure_devices), } run.run_state["progress"]["device"]["success"] = len(success_devices) run.run_state["progress"]["device"]["failure"] = len(failure_devices) run.run_state["summary"] = summary Session.refresh(run) run.restart_run = restart_run return {"payload": payload, "success": success}
def delete_edge(self, workflow_id: int, edge_id: int) -> str: delete("WorkflowEdge", id=edge_id) now = self.get_time() fetch("Workflow", id=workflow_id).last_modified = now return now
def clear_results(self, job_id: int) -> None: for result in fetch("Run", all_matches=True, allow_none=True, job_id=job_id): Session.delete(result)
def skip_jobs(self, skip: str, job_ids: str) -> None: for job_id in job_ids.split("-"): fetch("Job", id=job_id).skip = skip == "skip"
def get_run_results(self, id: int, device: Any, **kw: Any) -> Optional[dict]: run = fetch("Run", allow_none=True, id=id) return self.get_service_results(run.job.id, run.runtime, device, None, None)
def get_device_results(self, id: int, runtime: str, **_: Any) -> Optional[dict]: run = fetch("Run", allow_none=True, runtime=runtime) return next(r.result for r in run.results if r.device_id == int(id))
def get_properties(self, instance_type, id): return fetch(instance_type, id=id).get_properties()
def task_action(self, action: str, task_id: int) -> Optional[dict]: try: return getattr(fetch("Task", id=task_id), action)() except JobLookupError: return {"error": "This task no longer exists."}
def switch_menu(self, user_id): user = fetch("user", id=user_id) user.small_menu = not user.small_menu
def create_label(self, workflow_id: int, x: int, y: int, **kwargs: Any) -> dict: workflow, label_id = fetch("Workflow", id=workflow_id), str(uuid4()) label = {"positions": [x, y], "content": kwargs["content"]} workflow.labels[label_id] = label return {"id": label_id, **label}
def get(self, name: str, runtime: str) -> str: job = fetch("Job", name=name) return fetch("Result", job_id=job.id, runtime=runtime).result
def delete_label(self, workflow_id: int, label: int) -> str: workflow = fetch("Workflow", id=workflow_id) workflow.labels.pop(label) now = self.get_time() workflow.last_modified = now return now
def get(self, cls: str) -> Union[dict, list]: try: results = fetch(cls, all_matches=True, **request.args.to_dict()) return [result.get_properties() for result in results] except Exception: return abort(404, message=f"There are no such {cls}s.")