def openFile(self, path, mode="wb"): abs_path = self.path + "/" + path log.write("open target file: " + abs_path, "debug") if (self.fileExists(abs_path)): log.write("cannot create backup, file exists: ".abs_path) return False self.fh = self.fs.open(abs_path, mode)
def purgeOld(self): log.write("purge old dumps", "info") while self.getSnapshots().count() > self.getJob().getMaxSnapshotCount( ): self.deleteOldestSnapshot() return True
def deleteFile(self, path): abs_path = self.path + "/" + path if (not self.fileExists(path)): return False log.write("delete file: " + path) return self.fs.remove(abs_path)
def execJob(self, job): log.write("exec job " + job.id, "notice") job.setStatus("started") job.prepare() job.getTarget().prepare() for host in job.getHosts(): log.write("process host " + host.getID(), "notice") if host.prepare(): for task in job.getTasks(): if (job.getType() == "backup"): log.write("start task " + task.getName(), "debug") backup = Backup(job=job, source=host.getConnection(), target=job.getTarget().getConnection()) backup.run(task) if (job.getType() == "dump"): log.write("start dump " + task.getName(), "debug") dump = Dump(job=job, source=host.getConnection(), target=job.getTarget().getConnection()) dump.run(task) else: log.write("error: prepare host: " + host.getID(), "error") job.setLastRun()
def delete(self): if(not self.exists()): return True log.write("delete backup " + str(self.getID()), "debug") self.getTarget().prepare() self.getTarget().getConnection().deleteFile(self.getFilename()) return self.getDB().deleteById(self.getID())
def getContainersByName(self, name): cmd = 'docker ps -q --filter "name=' + name + '"' log.write("execute command: " + cmd, "debug") stdin, stdout, ssh_stderr = self.client.exec_command(cmd) ids = str(stdout.read(), 'ascii').splitlines() return ids
def run(self, task): self.task = task log.write("run task: " + str(self.task.getID())) self.prepare(task) if self.task.getData("container"): for container in self.task.getData("container"): self.c = self.task.getData("container")[container] log.write("fetch ids for container name: " + container, "debug") c_ids = self.source.getContainersByName(container) if len(c_ids) == 0: log.write("no matching containers found", "debug") else: for c_id in c_ids: self.container_id = c_id if "db" in self.c: if type(self.c["db"]) is str: self.c["db"] = [self.c["db"]] for db in self.c["db"]: self.container = container self.filename = self.getDumpFilename( db=db, container=self.container) self.backupDB(db=db, container_id=c_id) else: self.container = container self.filename = self.getDumpFilename( container=self.container) self.backupDB(container_id=c_id) if self.task.getData("stacks"): for stack in self.task.getData("stacks"): self.c = self.task.getData("stacks")[stack] log.write("fetch containers for stack name: " + stack, "debug") c_ids = self.source.getContainersByStack(stack) if len(c_ids) == 0: log.write("no matching containers found", "debug") else: for c_id in c_ids: if "db" in self.c: if type(self.c["db"]) is str: self.c["db"] = [self.c["db"]] for db in self.c["db"]: self.filename = self.getDumpFilename( db=db, stack=stack) self.backupDB(db=db, container_id=c_id) else: self.filename = self.getDumpFilename(stack=stack) self.backupDB(container_id=c_id)
def execCommand(self, cmd, wait=True): if not self.isConnected(): self.connect() log.write("execute command: " + cmd, "debug") self.transport = self.client.get_transport() self.channel = self.transport.open_session() self.channel.exec_command(cmd) if wait: self.channel.recv_exit_status()
def restoreArchive(self): self.tmp_path = "/tmp/ares-tmp/" cmd = "tar -C " + self.tmp_path + " -xz " if not self.fileExists(self.tmp_path): self.createDirectoryRecursive(self.tmp_path) log.write("execute command: " + cmd, "debug") self.stdin, self.stdout, self.stderr = self.client.exec_command(cmd)
def setCredential(self, credential): if (not credential.getType()): log.write("error no type specified for credentials", "error") if (not credential.getType() == "password"): log.write("error cifs only supports password auth", "error") return False self.secret_type = credential.getType() self.secret = credential.getSecret() self.username = credential.getUsername()
def get(self, id: str): log.write("load host by id: " + id, "debug") ret = self.getDB().get(id) if ret: for k, v in ret.items(): setattr(self, k, v) self.id = str(ret["_id"]) del(self._id) self.exist = True else: return False
def getAll(self, filter={}, type="object"): log.write("get all hosts", "debug") docs = self.getDB().getCollection(query=filter) if(type == "JSON"): return docs else: ret = [] for d in docs: r = Host(str(d["_id"])) ret.append(r) return ret
def getAll(self, filter={}, type="object"): log.write("get all jobs", "debug") docs = self.getDB().getCollection(filter) if (type == "JSON"): return docs else: ret = [] for d in docs: c_job = Job(str(d["_id"])) ret.append(c_job) return ret
def update(self, data): if not self.exists(): return False value = dict() for k, v in vars(data).items(): if v != None: value[k] = v log.write("update host %s (%s)" % (self.getID(), value)) update = self.getDB().updateDocByID(self.id, value) self.get(self.getID()) return True
def addBackupEntry(self, data={}): log.write("mark backup as compeleted", "debug") self.end_time = self.getTimestamp() doc = {"job_id": str(self.job.getID()), "filename": self.filename, "task_id": self.task.getID(), "host_id": self.source.conf.getID(), "target_id": self.target.conf.getID(), "type": self.task.getType(), "hostname": self.source.conf.getHostname(), "log": log.getBuffer(),"start_time": self.start_time, "end_time": self.end_time} self.getDB().addDoc(doc)
def getSnapshots(self): log.write("get snapshots") filter = { "task_id": self.getTask().getID(), "host_id": self.getHost().conf.getID(), "target_id": self.getTarget().conf.getID() } ret = self.getDB().find(filter=filter).sort("start_time", 1) log.write("found %i snapshots" % (ret.count())) return ret
def execCommandDocker(self, container, cmd, wait=True): if not self.isConnected(): self.connect() c = "/usr/bin/docker exec -t %s bash -c '%s' | tee -a /tmp/ares.out" % ( container, cmd) log.write("execute command '%s' on container '%s'" % (cmd, container), "debug") self.transport = self.client.get_transport() self.channel = self.transport.open_session() self.channel.exec_command(c) if wait: self.channel.recv_exit_status()
def syncDirectory(self, source=None, target="/", delete=False): if source == None: source = self.tmp_path cmd = "rsync -a " + source + " " + target if delete == True: cmd += " -delete" log.write("execute command: " + cmd) stdin, stdout, ssh_stderr = self.client.exec_command(cmd) self.removeDirectoryRecursive(source) return True
def createArchiveFromPaths(self, path): cmd = "tar -Ocz " p = "" for s in path: p += s + "/ " cmd += p if not self.isConnected(): self.connect() log.write("execute command: " + cmd, "debug") self.transport = self.client.get_transport() self.channel = self.transport.open_session() self.channel.exec_command(cmd + " 2>/dev/null")
def backupFile(self): if(not self.target.fileExists(self.getBackupRoot())): log.write("backup root does not exist. creating: " + self.getBackupRoot()) self.target.createDirectoryRecursive(self.getBackupRoot()) self.target.openFile(self.filename) self.source.createArchiveFromPaths(self.task.getData()) while True: data = self.source.readBinary() if not data: break self.target.writeFile(data) log.write("finish backup") self.addBackupEntry() self.target.closeFile()
def backupDockerContainer(self, id, container, stack=None): log.write("start backup of container %s to %s" % (id, self.filename)) if not self.target.fileExists(os.path.dirname(self.getFilename())): log.write("backup root does not exist. creating: " + os.path.dirname(self.getFilename())) self.target.createDirectoryRecursive(os.path.dirname(self.getFilename())) self.target.openFile(self.getFilename()) self.source.createArchiveFromContainerId(id) while True: data = self.source.readBinary() if not data: break self.target.writeFile(data) self.target.closeFile() log.write("finish backup of container %s" % (id)) logs = {"container": container} if not stack == None: logs["stack"] = stack self.addBackupEntry(logs) return True
def create(self, data): log.write("create task: " + str(data), "debug") if(Task(name=data.name).exists()): log.write("error task already exists: " + str(data), "debug") return False if(not data["type"] in self.supported_types): log.write("error task type not found: " + str(data["type"]), "debug") if("id" in data): del(data["id"]) log.write("create task: " + str(data)) doc = data.dict() return self.getDB().addDoc(doc)
def backupDB(self, db=None, container_id=None): state = False backup_root = os.path.dirname(self.getFilename()) if (not self.target.fileExists(backup_root)): log.write("dump root does not exist. creating: " + backup_root) self.target.createDirectoryRecursive(backup_root) cmd = self.getCmd()["dump"] if "port" in self.c: cmd += " --port " + str(self.c["port"]) if (db != None): cmd += " " + self.getCmd()["db_format"] + db elif ("dump_all" in self.getCmd()): cmd = self.getCmd()["dump_all"] if "gzip" in self.c: cmd += " | gzip" self.target.openFile(self.filename) if (container_id != None): self.source.execCommandDocker(container_id, cmd, wait=True) else: self.source.execCommand(cmd, wait=True) if (self.task.getType() in ["mongodb", "mysql"]): data = self.source.read() if (container_id != None): self.source.execCommandDocker( container_id, "tar -Oc /dumps 2>/dev/null | cat") else: self.source.execCommand("tar -Oc /dumps 2>/dev/null | cat") while True: data = self.source.readBinary() state = True if not data: break self.target.writeFile(data) if (self.task.getType() in ["mongodb", "mysql"]): if (container_id != None): self.source.execCommandDocker(container_id, "rm -rf /dumps") else: self.source.execCommand("rm -rf /dumps") self.target.closeFile() if (state == False): log.write("error: no data received", "error") self.getTarget().getConnection().deleteFile() else: self.addDumpEntry() log.write("finish dump")
def request(self): log.write("request new job", "notice") jobs = self.getAll() for job in jobs: if datetime.datetime.now() > job.getNextRun(): log.write("found available job: " + job.getID(), "debug") return job log.write("found no jobs", "debug") return False
def create(self, data): log.write("create job: " + str(data), "debug") if (Job().getIdByName(data.name)): log.write("error job already exists: " + str(data), "debug") return False if not Target(data.target_id).exists(): log.write("error target does not exist: " + str(data.target_id), "debug") log.write("create target setup: " + str(data)) doc = data.dict() if self.getDB().addDoc(doc): return True else: return False
def create(self, data): log.write("create secret: " + str(data), "debug") if (Secret(name=data.name).exists()): log.write("error secret already exists: " + str(data), "debug") return False log.write("create secret: " + str(data)) doc = data.dict() return self.getDB().addDoc(doc)
def connect(self): log.write("estabish connection", "debug") if (self.secret_type == "password"): self.fs = fs.open_fs( 'smb://%s:%s@%s/%s' % (self.username, self.secret, self.host, self.conf.location)) if (not self.fs.exists(self.conf.path)): log.write("create backup dir: " + self.conf.path, "info") self.fs.makedir(self.conf.path) else: log.write("unsupported secret type", "error")
def create(self, data): log.write("create host: " + str(data), "debug") if(Host(hostname=data.hostname).exists()): log.write("error host already exists: " + str(data), "debug") return False log.write("create host: " + str(data)) doc = data.dict() return self.getDB().addDoc(doc)
def connect(self): log.write("try to connect to host: " + self.host, "notice") if (self.secret_type == "password"): self.client.connect(self.host, username=self.username, password=self.secret) log.write("connected by password", "debug") return True elif (self.secret_type == "certificate"): self.client.connect(self.host, username=self.username, key_filename="/id_rsa") log.write("connected by certificate", "debug") return True else: log.write("secret type not supported by host_linux: " + self.secret_type) # except: # log.write("host not reachable: " + self.host) return False
def runJob(self): job = Job().request() if not job: return False log.write("found job: " + job.getID() + " " + job.getName(), "notice") ret = job.apply() if ret == False: log.write("error apply as worker " + job.getWorker(), "notice") else: log.write("apply as worker " + job.getWorker(), "notice") return self.execJob(job)