def job(self, run: "Run", payload: dict, device: Device) -> dict: now = app.strip_all(app.get_time()) source = Path.cwd() / "projects" / "migrations" / f"backup_{now}.tgz" app.migrate_export(Path.cwd(), { "import_export_types": import_classes, "name": f"backup_{now}" }) with open_tar(source, "w:gz") as tar: tar.add(Path.cwd() / "projects" / "migrations" / f"backup_{now}", arcname="/") ssh_client = SSHClient() ssh_client.set_missing_host_key_policy(AutoAddPolicy()) ssh_client.connect( device.ip_address, username=device.username, password=device.password, look_for_keys=False, ) destination = f"{run.sub(run.destination_path, locals())}/backup_{now}.tgz" run.transfer_file(ssh_client, [(source, destination)]) ssh_client.close() if run.delete_folder: rmtree(Path.cwd() / "projects" / "migrations" / f"backup_{now}") if run.delete_archive: remove(source) return { "success": True, "result": f"backup stored in {destination} ({device.ip_address})", }
def __init__(self, **kwargs): self.runtime = kwargs.get("runtime") or app.get_time() super().__init__(**kwargs) if not kwargs.get("parent_runtime"): self.parent_runtime = self.runtime self.path = str(self.service.id) else: self.path = f"{self.parent.path}>{self.service.id}"
def get_results(self, payload, device=None): self.log("info", "STARTING", device) start = datetime.now().replace(microsecond=0) skip_service = False if self.skip_query: skip_service = self.eval(self.skip_query, **locals())[0] if skip_service or self.skip: if device: self.run_state["progress"]["device"]["skipped"] += 1 key = "success" if self.skip_value == "True" else "failure" self.run_state["summary"][key].append(device.name) return { "result": "skipped", "success": self.skip_value == "True", } results = {"runtime": app.get_time(), "logs": []} try: if self.restart_run and self.service.type == "workflow": old_result = self.restart_run.result( device=device.name if device else None) if old_result and "payload" in old_result.result: payload.update(old_result["payload"]) if self.service.iteration_values: targets_results = {} for target in self.eval(self.service.iteration_values, **locals())[0]: self.payload_helper(payload, self.iteration_variable_name, target) targets_results[str(target)] = self.run_service_job( payload, device) results.update({ "result": targets_results, "success": all(r["success"] for r in targets_results.values()), }) else: results.update(self.run_service_job(payload, device)) except Exception: results.update({ "success": False, "result": chr(10).join(format_exc().splitlines()) }) self.log("error", chr(10).join(format_exc().splitlines()), device) results["duration"] = str(datetime.now().replace(microsecond=0) - start) if device: status = "success" if results["success"] else "failure" self.run_state["progress"]["device"][status] += 1 self.run_state["summary"][status].append(device.name) self.create_result(results, device) Session.commit() self.log("info", "FINISHED", device) if self.waiting_time: self.log("info", f"SLEEP {self.waiting_time} seconds...", device) sleep(self.waiting_time) return results
def get_results(self, payload: dict, device: Optional["Device"] = None) -> dict: self.log( "info", f"Running {self.job.type}{f' on {device.name}' if device else ''}") results: Dict[Any, Any] = {"runtime": app.get_time()} try: args = (device, ) if device else () if self.job.iteration_values: targets_results = {} for target in self.eval(self.job.iteration_values, **locals()): self.payload_helper(payload, self.iteration_variable_name, target) targets_results[target] = self.job.job( self, payload, *args) results.update({ "results": targets_results, "success": all(r["success"] for r in targets_results.values()), }) else: results.update(self.job.job(self, payload, *args)) except Exception: results.update({ "success": False, "result": chr(10).join(format_exc().splitlines()) }) if self.result_postprocessing: self.eval(self.result_postprocessing, function="exec", **locals()) results["endtime"] = app.get_time() self.log( "info", f"Finished running {self.job.type} '{self.job.name}'" f"({'SUCCESS' if results['success'] else 'FAILURE'})" f"{f' on {device.name}' if device else ''}", ) completed, failed = self.get_state("completed"), self.get_state( "failed") self.set_state(failed=failed + 1 - results["success"], completed=completed + 1) return results
def get_results(self, payload, device=None): self.log("info", "STARTING", device) results = {"runtime": app.get_time(), "logs": []} try: if self.restart_run and self.service.type == "workflow": old_result = self.restart_run.result(device=device.name) if old_result and "payload" in old_result.result: payload.update(old_result["payload"]) if self.service.iteration_values: targets_results = {} for target in self.eval(self.service.iteration_values, **locals()): self.payload_helper(payload, self.iteration_variable_name, target) targets_results[target] = self.run_service_job( payload, device) results.update({ "result": targets_results, "success": all(r["success"] for r in targets_results.values()), }) else: results.update(self.run_service_job(payload, device)) except Exception: results.update({ "success": False, "result": chr(10).join(format_exc().splitlines()) }) self.log("error", chr(10).join(format_exc().splitlines()), device) results["endtime"] = app.get_time() if device: status = "passed" if results["success"] else "failed" self.run_state["progress"]["device"][status] += 1 self.run_state["summary"][status].append(device.name) self.create_result(results, device) Session.commit() self.log("info", "FINISHED", device) return results["success"]
def post(self): task = db.fetch("task", rbac="schedule", id=request.get_json()) data = { "trigger": "Scheduler", "creator": task.last_scheduled_by, "runtime": app.get_time(), "task": task.id, **task.initial_payload, } if task.devices: data["target_devices"] = [device.id for device in task.devices] if task.pools: data["target_pools"] = [pool.id for pool in task.pools] Thread(target=app.run, args=(task.service.id,), kwargs=data).start()
def post(self): task = db.fetch("task", id=request.get_json()) data = { "trigger": "Scheduler", "creator": request.authorization["username"], "runtime": app.get_time(), "task": task.id, **task.initial_payload, } if task.devices: data["devices"] = [device.id for device in task.devices] if task.pools: data["pools"] = [pool.id for pool in task.pools] Thread(target=app.run, args=(task.service.id, ), kwargs=data).start()
def run(self, payload: Optional[dict] = None) -> dict: try: self.log("info", f"{self.job.type} {self.job.name}: Starting") self.set_state(status="Running", type=self.job.type) app.job_db[self.job.id]["runs"] += 1 Session.commit() payload = payload or self.job.initial_payload if self.restart_run and self.job.type == "Workflow": global_result = self.restart_run.result() if global_result: payload["variables"] = global_result.result["results"].get( "variables", {} ) results = self.job.build_results(self, payload) except Exception: result = ( f"Running {self.job.type} '{self.job.name}'" " raised the following exception:\n" f"{chr(10).join(format_exc().splitlines())}\n\n" "Run aborted..." ) self.log("error", result) results = {"success": False, "results": result} finally: self.close_connection_cache() if self.stop: status = "Aborted" else: status = f"Completed ({'success' if results['success'] else 'failure'})" self.status = status # type: ignore self.set_state(status=status, success=results["success"]) app.job_db[self.job.id]["runs"] -= 1 results["endtime"] = self.endtime = app.get_time() # type: ignore results["state"] = app.run_db.pop(self.runtime) results["logs"] = app.run_logs.pop(self.runtime) # type: ignore if self.task and not self.task.frequency: self.task.is_active = False results["properties"] = { "run": self.properties, "service": self.job.get_properties(exclude=["positions"]), } self.create_result(results) self.log("info", f"{self.job.type} {self.job.name}: Finished") Session.commit() if not self.workflow and self.send_notification: self.notify(results) return results
def post(self): data = { "trigger": "REST", "creator": request.authorization["username"], **request.get_json(force=True), } errors, devices, pools = [], [], [] service = db.fetch("service", name=data["name"]) handle_asynchronously = data.get("async", False) for device_name in data.get("devices", ""): device = db.fetch("device", name=device_name) if device: devices.append(device.id) else: errors.append( f"No device with the name '{device_name}'") for device_ip in data.get("ip_addresses", ""): device = db.fetch("device", ip_address=device_ip) if device: devices.append(device.id) else: errors.append( f"No device with the IP address '{device_ip}'") for pool_name in data.get("pools", ""): pool = db.fetch("pool", name=pool_name) if pool: pools.append(pool.id) else: errors.append(f"No pool with the name '{pool_name}'") if errors: return {"errors": errors} if devices or pools: data.update({"devices": devices, "pools": pools}) data["runtime"] = runtime = app.get_time() if handle_asynchronously: app.scheduler.add_job( id=runtime, func=app.run, run_date=datetime.now(), args=[service.id], kwargs=data, trigger="date", ) return {"errors": errors, "runtime": runtime} else: return {**app.run(service.id, **data), "errors": errors}
def run(self, payload=None): self.init_state() self.run_state["status"] = "Running" if payload is None: payload = self.service.initial_payload try: app.service_db[self.service.id]["runs"] += 1 Session.commit() results = self.device_run(payload) except Exception: result = (f"Running {self.service.type} '{self.service.name}'" " raised the following exception:\n" f"{chr(10).join(format_exc().splitlines())}\n\n" "Run aborted...") self.log("error", result) results = { "success": False, "runtime": self.runtime, "result": result } finally: Session.commit() self.status = "Aborted" if self.stop else "Completed" self.run_state["status"] = self.status if self.run_state["success"] is not False: self.run_state["success"] = results["success"] app.service_db[self.service.id]["runs"] -= 1 results["endtime"] = self.endtime = app.get_time() results["logs"] = app.run_logs.pop(self.runtime, None) if self.parent_runtime == self.runtime: self.state = results["state"] = app.run_db.pop( self.parent_runtime) if self.task and not self.task.frequency: self.task.is_active = False results["properties"] = { "run": self.properties, "service": self.service.get_properties(exclude=["positions"]), } if self.send_notification: results = self.notify(results) if self.push_to_git: self.git_push(results) self.create_result(results) Session.commit() return results
def post(self) -> Union[str, dict]: try: errors, data = [], request.get_json(force=True) devices, pools = [], [] job = fetch("Job", name=data["name"]) handle_asynchronously = data.get("async", False) for device_name in data.get("devices", ""): device = fetch("Device", name=device_name) if device: devices.append(device.id) else: errors.append(f"No device with the name '{device_name}'") for device_ip in data.get("ip_addresses", ""): device = fetch("Device", ip_address=device_ip) if device: devices.append(device.id) else: errors.append( f"No device with the IP address '{device_ip}'") for pool_name in data.get("pools", ""): pool = fetch("Pool", name=pool_name) if pool: pools.append(pool.id) else: errors.append(f"No pool with the name '{pool_name}'") if errors: return {"errors": errors} except Exception as e: info(f"REST API run_job endpoint failed ({str(e)})") return str(e) if devices or pools: data.update({"devices": devices, "pools": pools}) data["runtime"] = runtime = app.get_time() if handle_asynchronously: app.scheduler.add_job( id=runtime, func=app.run, run_date=datetime.now(), args=[job.id], kwargs=data, trigger="date", ) return {"errors": errors, "runtime": runtime} else: return {**app.run(job.id, **data), "errors": errors}
def post(self): data = { "trigger": "REST", "creator": request.authorization["username"], **request.get_json(force=True), } errors, devices, pools = [], [], [] service = db.fetch("service", name=data["name"], rbac="run") handle_asynchronously = data.get("async", False) for device_name in data.get("devices", ""): device = db.fetch("device", name=device_name) if device: devices.append(device.id) else: errors.append( f"No device with the name '{device_name}'") for device_ip in data.get("ip_addresses", ""): device = db.fetch("device", ip_address=device_ip) if device: devices.append(device.id) else: errors.append( f"No device with the IP address '{device_ip}'") for pool_name in data.get("pools", ""): pool = db.fetch("pool", name=pool_name) if pool: pools.append(pool.id) else: errors.append(f"No pool with the name '{pool_name}'") if errors: return {"errors": errors} if devices or pools: data.update({ "target_devices": devices, "target_pools": pools }) data["runtime"] = runtime = app.get_time() if handle_asynchronously: Thread(target=app.run, args=(service.id, ), kwargs=data).start() return {"errors": errors, "runtime": runtime} else: return {**app.run(service.id, **data), "errors": errors}
def __init__(self, **kwargs): self.runtime = kwargs.get("runtime") or app.get_time() super().__init__(**kwargs) if not kwargs.get("parent_runtime"): self.parent_runtime = self.runtime self.restart_path = kwargs.get("restart_path") self.path = str(self.service.id) else: self.path = f"{self.parent.path}>{self.service.id}" restart_path = self.original.restart_path if restart_path: path_ids = restart_path.split(">") if str(self.service.id) in path_ids: workflow_index = path_ids.index(str(self.service.id)) if workflow_index == len(path_ids) - 2: self.start_services = path_ids[-1].split("-") elif workflow_index < len(path_ids) - 2: self.start_services = [path_ids[workflow_index + 1]] if not self.start_services: self.start_services = [fetch("service", scoped_name="Start").id]
def __init__(self, **kwargs): self.name = app.get_time() self.label = kwargs["subtype"] self.color = self.color_mapping[kwargs["subtype"]] super().__init__(**kwargs)
def __init__(self, **kwargs): self.runtime = kwargs.get("runtime") or app.get_time() super().__init__(**kwargs) if not kwargs.get("parent_runtime"): self.parent_runtime = self.runtime
def __init__(self, **kwargs): super().update(**kwargs) self.creation_time = app.get_time() self.aps_job_id = kwargs.get("aps_job_id", self.creation_time) if self.is_active: self.schedule()
def __init__(self, **kwargs: Any) -> None: self.runtime = kwargs.get("runtime") or app.get_time() # type: ignore if not kwargs.get("parent_runtime"): self.parent_runtime = self.runtime super().__init__(**kwargs)
def __init__(self, **kwargs: Any) -> None: super().update(**kwargs) self.creation_time = app.get_time() # type: ignore self.aps_job_id = kwargs.get("aps_job_id", self.creation_time)