async def update_target(id: str, item: Struct): target = Target(id) if not target.exists(): raise HTTPException(status_code=404, detail="target not found") if not target.update(item): raise HTTPException(status_code=422, detail="error update target") return parseJson(target)
async def create_target(data: Struct): ret = Target(data=data) if ret: return {"state": "true"} else: raise HTTPException(status_code=422, detail="can't create target")
async def get_all_targets(): ret = Target().getAll() if ( len(ret) == 0 ): raise HTTPException(status_code=404, detail="no targets found") else: return parseJson(ret)
def setUpClass(cls): cls.directory = Path(tempfile.gettempdir()) target = Target(layers=[ Layer("layer1", [ Element.from_string("Li 1.0") ], 0.01, 0.01, start_depth=0.0), Layer("layer2", [ Element.from_string("Li 0.048"), Element.from_string("O 0.649"), Element.from_string("Mn 0.303") ], 90.0, 4.0, start_depth=0.01), Layer("subtrate", [ Element.from_string("Si 1.0") ], 1000.0, 2.32, start_depth=90.01) ]) cls.mcerd = MCERD(101, { "recoil_element": mo.get_recoil_element(), "sim_dir": tempfile.gettempdir(), "simulation_type": SimulationType.ERD, "target": target, "detector": mo.get_detector(), "beam": mo.get_beam(), # Following simulation parameters have been determined by the # rigorous application of the Stetson-Harrison method. "minimum_scattering_angle": 5.5, "minimum_main_scattering_angle": 6.5, "minimum_energy_of_ions": 8.15, "number_of_recoils": 15, "simulation_mode": SimulationMode.NARROW, "number_of_scaling_ions": 14, "number_of_ions_in_presimu": 100, "number_of_ions": 1000 }, mo.get_element_simulation().get_full_name())
def test_serialization(self): t = Target(name="foo", modification_time=random.randint(0, 100), description="bar", target_type="AFM", image_size=(random.randint(0, 99), random.randint(0, 99)), image_file="test", target_theta=random.random(), scattering_element=mo.get_element(randomize=True), layers=[mo.get_layer()]) with tempfile.TemporaryDirectory() as tmp_dir: tgt_file = Path(tmp_dir, ".target") t.to_file(tgt_file) t2 = Target.from_file(tgt_file, mo.get_request()) self.assertIsNot(t, t2) self.assertEqual(t.name, t2.name) self.assertEqual(t.description, t2.description) self.assertEqual(t.layers[0].elements, t2.layers[0].elements) self.assertEqual(t.image_size, t2.image_size) self.assertEqual(t.target_theta, t2.target_theta) self.assertEqual(t.target_type, t2.target_type) self.assertEqual(t.scattering_element, t2.scattering_element)
def create(self, data): log.write("create job: " + str(data), "debug") if (Job().getIdByName(data.name)): log.write("error job already exists: " + str(data), "debug") return False if not Target(data.target_id).exists(): log.write("error target does not exist: " + str(data.target_id), "debug") log.write("create target setup: " + str(data)) doc = data.dict() if self.getDB().addDoc(doc): return True else: return False
async def create_job(data: Struct): if (Job(name=data.name).exists()): raise HTTPException(status_code=422, detail="job already exist") for id in data.host_ids: if not Host(id): raise HTTPException(status_code=422, detail="host does not exist") for id in data.task_ids: if not Task(id): raise HTTPException(status_code=422, detail="task does not exist") if not Target(data.target_id): raise HTTPException(status_code=422, detail="target does not exist") j = Job(data=data) if j.getID(): return {"state": "true"} else: raise HTTPException(status_code=422, detail="can't create target")
async def search_for_target(filter: Struct): ret = Target().getAll(filter=shrinkJson(filter)) return parseJson(ret)
def prepareTarget(self): self.target = Target(self.getTargetID())
raise SystemError # ====== # # Main # # ====== # if __name__ == "__main__": if connection.clientID != -1: # Now send some data to CoppeliaSim in a non-blocking fashion: print('Connected to remote API server!') sim.simxAddStatusbarMessage(connection.clientID, 'Connected to remote API client!', sim.simx_opmode_oneshot) # ----- Initialization ----- # # Get Objects from Simulation # sysCall_init() robot = Pioneer('Pioneer_p3dx') target = Target('GPS') scene = Scene() # ----- Threads (Tasks) ----- # # thread1 = Thread(target=RobotStatus, args=("Thread-1", robot)) # thread1.start() # print("[Thread-1] 'RobotStatus' started!") # thread2 = Thread(target=TargetStatus, args=("Thread-2", target)) # thread2.start() # print("[Thread-2] 'TargetStatus' started!") if doPlanning: thread3 = Thread(target=Planning, args=("Thread-3", robot, target, scene)) thread3.start() print("[Thread-3] 'Planning' started!")
def getHost(self): if not hasattr(self, "source"): self.source = Target(self.host_id) return self.target
class Backup(): col_name = "backup" def __init__(self, id=None, job=None, source=None, target=None): self.filename = "" log.clearBuffer() if(job != None): self.setJob(job) if(source != None): self.setSource(source) if(target != None): self.setTarget(target) self.start_time = self.getTimestamp() if(id != None): self.get(id) def getID(self): if hasattr(self, "id"): return self.id else: return False def getAll(self, filter={}, type="object"): log.write("get all backups", "debug") docs = self.getDB().getCollection(query=filter) if(type == "JSON"): return docs else: ret = [] for d in docs: r = Backup(str(d["_id"])) ret.append(r) return ret def getTarget(self): if not hasattr(self, "target"): self.target = Target(self.target_id) return self.target def getHost(self): if not hasattr(self, "source"): self.source = Target(self.host_id) return self.target def get(self, id): log.write("load backup by id: " + id, "debug") ret = self.getDB().get(id) if ret: for k, v in ret.items(): setattr(self, k, v) self.id = str(ret["_id"]) del(self._id) self.exist = True else: return False def getDB(self): return DB(self.col_name) def toJSON(self): r = self return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def setJob(self, job): self.job = job def getJob(self): if not hasattr(self, "job"): self.job = Job(self.job) return self.job def setTarget(self, target): self.target = target def getTarget(self): if not hasattr(self, "target"): self.target = Target(self.target_id) return self.target def setSource(self, source): self.source = source def getSource(self): if not hasattr(self, "source"): self.source = Host(self.host_id) return self.source def getTimestamp(self): now = datetime.datetime.now() ts = now.strftime('%Y-%m-%d_%H-%M-%S') return ts def getNewBackupPath(self): return self.getBackupRoot() + "/" + self.start_time + "-" + self.task.getName() + ".tar.gz" def getContainerBackupPath(self, name): return self.getBackupRoot() + "/" + self.start_time + "-" + name + ".tar.gz" def getStackBackupPath(self, stack, name): return self.getBackupRootShort() + "/" + stack + "/" + self.start_time + "-" + name + ".tar.gz" def run(self, task): self.task = task log.write("run task: " + str(self.task.getID())) self.prepare(task) if(self.task.getType() == "file"): log.write("init file backup", "notice") self.filename = self.getNewBackupPath() self.backupFile() return True elif(self.task.getType() == "docker"): log.write("init docker backup", "notice") c_ids = [] if self.task.getData("container"): for container in self.task.getData("container"): log.write("fetch ids for container name: " + container, "debug") c_ids = self.source.getContainersByName(container) if len(c_ids) == 0: log.write("no matching containers found", "debug") else: for id in c_ids: self.filename = self.getContainerBackupPath(container) self.backupDockerContainer(id, container) if self.task.getData("stacks"): for stack in self.task.getData("stacks"): log.write("fetch containers for stack name: " + stack, "debug") c_names = self.source.getContainersByStack(stack) if len(c_names) == 0: log.write("no matching containers found", "debug") else: for container in c_names: self.filename = self.getStackBackupPath(stack, container) for id in self.source.getContainersByName(container): self.backupDockerContainer(id, container, stack) self.purgeOld() log.write("finish docker backup", "debug") # self.target.openFile(filename) else: log.write("error unsupported task type: " + task["type"]) def backupFile(self): if(not self.target.fileExists(self.getBackupRoot())): log.write("backup root does not exist. creating: " + self.getBackupRoot()) self.target.createDirectoryRecursive(self.getBackupRoot()) self.target.openFile(self.filename) self.source.createArchiveFromPaths(self.task.getData()) while True: data = self.source.readBinary() if not data: break self.target.writeFile(data) log.write("finish backup") self.addBackupEntry() self.target.closeFile() def backupDockerContainer(self, id, container, stack=None): log.write("start backup of container %s to %s" % (id, self.filename)) if not self.target.fileExists(os.path.dirname(self.getFilename())): log.write("backup root does not exist. creating: " + os.path.dirname(self.getFilename())) self.target.createDirectoryRecursive(os.path.dirname(self.getFilename())) self.target.openFile(self.getFilename()) self.source.createArchiveFromContainerId(id) while True: data = self.source.readBinary() if not data: break self.target.writeFile(data) self.target.closeFile() log.write("finish backup of container %s" % (id)) logs = {"container": container} if not stack == None: logs["stack"] = stack self.addBackupEntry(logs) return True def exists(self): if hasattr(self, "exist"): return self.exist else: return False def getFilename(self): if hasattr(self, "filename"): return self.filename return False def getSnapshots(self): log.write("get snapshots") filter = { "task_id": self.getTask().getID(), "host_id": self.getHost().conf.getID(), "target_id": self.getTarget().conf.getID() } ret = self.getDB().find(filter=filter).sort("start_time", 1) log.write("found %i snapshots" % (ret.count())) return ret def deleteOldestSnapshot(self): id = self.getSnapshots()[0]["_id"] return Backup(id).delete() def purgeOld(self): log.write("purge old backups", "info") while self.getSnapshots().count() > self.getJob().getMaxSnapshotCount(): self.deleteOldestSnapshot() return True def addBackupEntry(self, data={}): log.write("mark backup as compeleted", "debug") self.end_time = self.getTimestamp() doc = {"job_id": str(self.job.getID()), "filename": self.filename, "task_id": self.task.getID(), "host_id": self.source.conf.getID(), "target_id": self.target.conf.getID(), "type": self.task.getType(), "hostname": self.source.conf.getHostname(), "log": log.getBuffer(),"start_time": self.start_time, "end_time": self.end_time} self.getDB().addDoc(doc) def restore(self, overwrite=True, replace=False): if(self.task.getType() == "file"): log.write("restore backup " + self.getID(), "notice") self.getTarget().prepare() self.getTarget().getConnection().openFile(self.getFilename(), "rb") self.getSource().prepare() self.getSource().getConnection().restoreArchive() while True: data = self.getTarget().getConnection().readBinary() if not data: break self.getSource().getConnection().writeBinary(data) self.getSource().getConnection().closeFile() self.getSource().getConnection().syncDirectory() log.write("restored backup " + self.getID()) else: log.write("error: restore not supported for this type", "error") return False def getBackupRootShort(self): return "/backup/" + str(self.job.getID()) + "/" + self.task.getID() + "/" def getBackupRootStack(self, stack): return "/backup/" + str(self.job.getID()) + "/" + self.task.getID() + "/" + stack + "/" def getBackupRoot(self): return "/backup/" + str(self.job.getID()) + "/" + self.task.getID() + "/" + self.source.conf.getHostname() + "/" def getTask(self): if hasattr(self, "task"): return self.task return False def prepare(self, task): self.task = task def delete(self): if(not self.exists()): return True log.write("delete backup " + str(self.getID()), "debug") self.getTarget().prepare() self.getTarget().getConnection().deleteFile(self.getFilename()) return self.getDB().deleteById(self.getID())
def get_target() -> Target: """Returns a default Target object. """ return Target()
def getHost(self): if not hasattr(self, "host"): self.host = Target(self.host_id) return self.target return ts
class Dump(): col_name = "dump" cmd = { "mongodb": { "dump": "mongodump -o /dumps", "cred_format": "--uri='%s'", "db_format": "--db=" }, "mysql": { "dump": "mysqldump --comments --routines ", "dump_all": 'mkdir /dumps; cd /dumps; mysql -N -e "show databases" | grep -vE "^(mysql|performance_schema|information_schema)$" | while read dbname; do mysqldump --complete-insert --routines --triggers --single-transaction "$dbname" | gzip > "$dbname".sql.gz; done', "db_format": "%s" } } def __init__(self, id=None, job=None, source=None, target=None): self.filename = "" log.clearBuffer() if (job != None): self.setJob(job) if (source != None): self.setSource(source) if (target != None): self.setTarget(target) self.start_time = self.getTimestamp() if (id != None): self.get(id) def get(self, id): log.write("load dump by id: " + id, "debug") ret = self.getDB().get(id) if ret: for k, v in ret.items(): setattr(self, k, v) self.id = str(ret["_id"]) del (self._id) self.exist = True else: return False def getAll(self, filter={}, type="object"): log.write("get all dumps", "debug") docs = self.getDB().getCollection(query=filter) if (type == "JSON"): return docs else: ret = [] for d in docs: r = Dump(str(d["_id"])) ret.append(r) return ret def getCmd(self): type = self.task.getType() if type: return self.cmd[type] return False def getTimestamp(self): now = datetime.datetime.now() ts = now.strftime('%Y-%m-%d_%H-%M-%S') return ts def getDB(self): return DB(self.col_name) def exists(self): if hasattr(self, "exist"): return self.exist else: return False def getFilename(self): if hasattr(self, "filename"): return self.filename return False def getSnapshots(self): log.write("get snapshots") filter = { "task_id": self.getTask().getID(), "host_id": self.getHost().getID(), "target_id": self.getTarget().getID() } ret = self.getDB.find(filter=filter).sort("start_time", 1) log.write("found %i snapshots" % (ret.count())) return ret def deleteOldestSnapshot(self): id = self.getSnapshots()[0]["_id"] return Dump(id).delete() def purgeOld(self): log.write("purge old dumps", "info") while self.getSnapshots().count() > self.getJob().getMaxSnapshotCount( ): self.deleteOldestSnapshot() return True def toJSON(self): r = self return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def getID(self): if hasattr(self, "id"): return self.id else: return False def setJob(self, job): self.job = job def getJob(self): if not hasattr(self, "job"): self.job = Job(self.job) return self.job def setTarget(self, target): self.target = target def getTarget(self): if not hasattr(self, "target"): self.target = Target(self.target_id) return self.target def setSource(self, source): self.source = source def getSource(self): if not hasattr(self, "source"): self.source = Host(self.host_id) return self.source def getTarget(self): if not hasattr(self, "target"): self.target = Target(self.target_id) return self.target def getHost(self): if not hasattr(self, "host"): self.host = Target(self.host_id) return self.target return ts def getType(self): if not hasattr(self, "type"): return self.type return False def getTask(self): if hasattr(self, "task"): return self.task return False def prepare(self, task): self.task = task def delete(self): if (not self.exists()): return True log.write("delete dump " + str(self.getID()), "debug") self.getTarget().prepare() self.getTarget().getConnection().deleteFile(self.getFilename()) return self.getDB().deleteById(self.getID()) def run(self, task): self.task = task log.write("run task: " + str(self.task.getID())) self.prepare(task) if self.task.getData("container"): for container in self.task.getData("container"): self.c = self.task.getData("container")[container] log.write("fetch ids for container name: " + container, "debug") c_ids = self.source.getContainersByName(container) if len(c_ids) == 0: log.write("no matching containers found", "debug") else: for c_id in c_ids: self.container_id = c_id if "db" in self.c: if type(self.c["db"]) is str: self.c["db"] = [self.c["db"]] for db in self.c["db"]: self.container = container self.filename = self.getDumpFilename( db=db, container=self.container) self.backupDB(db=db, container_id=c_id) else: self.container = container self.filename = self.getDumpFilename( container=self.container) self.backupDB(container_id=c_id) if self.task.getData("stacks"): for stack in self.task.getData("stacks"): self.c = self.task.getData("stacks")[stack] log.write("fetch containers for stack name: " + stack, "debug") c_ids = self.source.getContainersByStack(stack) if len(c_ids) == 0: log.write("no matching containers found", "debug") else: for c_id in c_ids: if "db" in self.c: if type(self.c["db"]) is str: self.c["db"] = [self.c["db"]] for db in self.c["db"]: self.filename = self.getDumpFilename( db=db, stack=stack) self.backupDB(db=db, container_id=c_id) else: self.filename = self.getDumpFilename(stack=stack) self.backupDB(container_id=c_id) def getDumpFilename(self, db=None, container=None, stack=None): base = "/dumps/" + str( self.job.getID()) + "/" + self.task.getID() + "/" if (stack != None): base += stack + "/" if (container != None): base += container + "/" if (db != None): filename = self.getTimestamp() + "_" + db else: filename = self.getTimestamp() + "_all_databases" if self.task.getType() == "mongodb": filename += ".tar" elif self.task.getType() == "mysql": filename += ".tar" return base + filename def getFilename(self): if hasattr(self, "filename"): return self.filename return False def backupDB(self, db=None, container_id=None): state = False backup_root = os.path.dirname(self.getFilename()) if (not self.target.fileExists(backup_root)): log.write("dump root does not exist. creating: " + backup_root) self.target.createDirectoryRecursive(backup_root) cmd = self.getCmd()["dump"] if "port" in self.c: cmd += " --port " + str(self.c["port"]) if (db != None): cmd += " " + self.getCmd()["db_format"] + db elif ("dump_all" in self.getCmd()): cmd = self.getCmd()["dump_all"] if "gzip" in self.c: cmd += " | gzip" self.target.openFile(self.filename) if (container_id != None): self.source.execCommandDocker(container_id, cmd, wait=True) else: self.source.execCommand(cmd, wait=True) if (self.task.getType() in ["mongodb", "mysql"]): data = self.source.read() if (container_id != None): self.source.execCommandDocker( container_id, "tar -Oc /dumps 2>/dev/null | cat") else: self.source.execCommand("tar -Oc /dumps 2>/dev/null | cat") while True: data = self.source.readBinary() state = True if not data: break self.target.writeFile(data) if (self.task.getType() in ["mongodb", "mysql"]): if (container_id != None): self.source.execCommandDocker(container_id, "rm -rf /dumps") else: self.source.execCommand("rm -rf /dumps") self.target.closeFile() if (state == False): log.write("error: no data received", "error") self.getTarget().getConnection().deleteFile() else: self.addDumpEntry() log.write("finish dump") def addDumpEntry(self, data={}): log.write("mark dump as compeleted", "debug") self.end_time = self.getTimestamp() doc = { "job_id": str(self.job.getID()), "filename": self.filename, "task_id": self.task.getID(), "host_id": self.source.conf.getID(), "target_id": self.target.conf.getID(), "type": self.task.getType(), "hostname": self.source.conf.getHostname(), "log": log.getBuffer(), "start_time": self.start_time, "end_time": self.end_time } self.getDB().addDoc(doc)
async def delete_target(id: str): ret = Target(id).delete() if ( ret == False ): raise HTTPException(status_code=404, detail="target not found") else: return parseJson(ret)
def getTarget(self): if not hasattr(self, "target"): self.target = Target(self.target_id) return self.target