Пример #1
0
async def get_job(id: str):
    job = Job(id)

    if (job.exists() == False):
        raise HTTPException(status_code=404, detail="job not found")
    else:
        return parseJson(job)
Пример #2
0
async def run_job(id: str):
    job = Job(id)
    if not job.exists():
        raise HTTPException(status_code=404,
                            detail="job not found or already claimed")
    worker = Worker()
    worker.execJob(job)
    return {"status": "success"}
Пример #3
0
async def update_job(id: str, item: Struct):
    job = Job(id)

    if not job:
        raise HTTPException(status_code=404, detail="job not found")

    if not job.update(item):
        raise HTTPException(status_code=422, detail="error update Job")

    return parseJson(job)
Пример #4
0
    def __init__(self, control_app, app, ui, logging_queue):
        super(ServiceManager, self).__init__()
        # global LOGGER
        # LOGGER = setup_queued_logger(__name__, logging_queue)

        self.control_app, self.app, self.ui = control_app, app, ui

        self.job_working_queue = list()
        self.job_queue = list()
        self.empty_job = Job(_('Kein Job'), '', get_user_directory(),
                             'mayaSoftware')
        self.current_job = self.empty_job

        # Control app signals
        self.start_job_signal.connect(self.control_app.add_render_job)
        self.job_widget_signal.connect(self.control_app.update_job_widget)
        self.abort_running_job_signal.connect(
            self.control_app.abort_running_job)
        self.force_psd_creation_signal.connect(
            self.control_app.watcher_force_psd_creation)

        # Run service manager socket server
        self.server = None
        self.address = ('', 0)
        self.hostname = ''

        # Timer's must be created inside thread event loop, I guess...
        self.alive_led_timer = None
        self.validate_queue_timer = None
Пример #5
0
async def apply_for_job(id: str):
    ret = Job(id).apply()
    if (ret == False):
        raise HTTPException(status_code=404,
                            detail="job not found or already claimed")
    else:
        return parseJson(ret)
Пример #6
0
    def __init__(self, app, ui, logging_queue):
        """

        :param modules.main_app.PfadAeffchenApp app:
        :param ui:
        :param logging_queue:
        """
        super(ControlApp, self).__init__(ui=ui)
        global LOGGER
        LOGGER = setup_queued_logger(__name__, logging_queue)
        self.app, self.ui, self.logging_queue = app, ui, logging_queue

        self.scene_file = None
        self.render_path = None
        self.job_aborted = False
        self.mod_dir = self.app.mod_dir
        self.job_finished_timer.timeout.connect(self.job_finished)

        # Initialise Main Window
        self.ui.actionToggleWatcher.toggled.connect(self.toggle_watcher_window)
        self.ui.enableQueue.toggled.connect(self.start_queue)
        self.ui.startRenderService.toggled.connect(self.toggle_render_service)
        self.ui.actionReport.triggered.connect(self.save_status_report)
        self.ui.lineEditSubnet.editingFinished.connect(
            self.update_valid_ip_subnet_patterns)

        # Setup renderer ComboBox and set default to mayaSoftware 0
        setup_combo_box(self.ui.comboBox_renderer, AVAILABLE_RENDERER, 0)

        # Setup Maya version ComboBox and set default to highest version found
        available_maya_versions = get_available_versions()
        self.update_status(
            _('Installierte Maya Versionen: {}').format(
                available_maya_versions))
        setup_combo_box(self.ui.comboBox_version, available_maya_versions)

        # Setup socket server to receive status updates
        self.server = run_message_server(
            (self.update_status, self.led_socket_recv_start,
             self.led_socket_recv_end))
        self.layer_creation_thread = None

        # Create default job
        self.empty_job = Job(_('Kein Job'), '', get_user_directory(),
                             self.ui.comboBox_renderer.currentText())
        self.current_job = self.empty_job

        # Setup socket send
        self.socket_send = SendMessage()
        self.socket_send.send_started.connect(self.led_socket_send_start)
        self.socket_send.send_ended.connect(self.led_socket_send_end)
        self.socket_send.send_error.connect(self.led_socket_send_error)

        # GUI Feedback we are alive and running
        self.alive_job_timer.timeout.connect(self.alive_job_blink)

        self.init_network_render_service()
Пример #7
0
async def create_job(data: Struct):

    if (Job(name=data.name).exists()):
        raise HTTPException(status_code=422, detail="job already exist")

    for id in data.host_ids:
        if not Host(id):
            raise HTTPException(status_code=422, detail="host does not exist")

    for id in data.task_ids:
        if not Task(id):
            raise HTTPException(status_code=422, detail="task does not exist")

    if not Target(data.target_id):
        raise HTTPException(status_code=422, detail="target does not exist")

    j = Job(data=data)

    if j.getID():
        return {"state": "true"}
    else:
        raise HTTPException(status_code=422, detail="can't create target")
Пример #8
0
    def add_job(self, job_data, client: str = None):
        if type(job_data) is str:
            # Remove trailing semicolon
            if job_data.endswith(';'):
                job_data = job_data[:-1]
            # Convert to tuple
            job_data = tuple(job_data.split(';'))

        if not len(job_data) > 2:
            return False

        try:
            job_item = Job(*job_data)
        except Exception as e:
            LOGGER.error('Error creating job from socket stream: %s %s',
                         job_data, e)
            return False

        if not job_item.file or not job_item.render_dir:
            return False

        if client:
            job_item.client = client

        if not os.path.exists(job_item.file):
            return False
        if not os.path.exists(job_item.render_dir):
            return False

        self.invalidate_transfer_cache()

        job_item.remote_index = len(self.job_queue)
        self.job_queue.append(job_item)
        self.job_widget_signal.emit(job_item)
        self.start_job_file_transfer(job_item)

        return True
Пример #9
0
    def runJob(self):
        job = Job().request()

        if not job:
            return False

        log.write("found job: " + job.getID() + " " + job.getName(), "notice")

        ret = job.apply()
        if ret == False:
            log.write("error apply as worker " + job.getWorker(), "notice")
        else:
            log.write("apply as worker " + job.getWorker(), "notice")
            return self.execJob(job)
    def update_job_data(self, data):
        """ Receives pickled job data """
        if data is None:
            self.update_job_manager_timer.stop()
            self.update_job_alive_timer.stop()
        elif b'Queue-Finished' in data:
            self.ovr.display(
                '<b>Render Service Warteschlange fertiggestellt.</b><br>'
                '<i>Automatische Job Manager Updates wurden ausgeschaltet.</i>',
                12000)
            self.update_job_manager_timer.stop()
            self.update_job_alive_timer.stop()
            # Remove Queue finished data
            data = data[:data.find(b'Queue-Finished')]

        try:
            load_dict = json.loads(data, encoding='utf-8')
        except Exception as e:
            LOGGER.error(e)
            return

        self.job_queue = list()

        for d in load_dict.items():
            idx, job_dict = d
            # Create empty job instance
            job = Job('', '', '', '')
            # Update instance from loaded dict
            job.__dict__.update(job_dict)
            # Store in job queue
            self.job_queue.append(job)

        if not self.job_queue:
            return

        # Clear Job Manager widget and re-construct from received data
        self.ui.widgetJobManager.clear()

        for job in self.job_queue:
            update_job_manager_widget(job, self.ui.widgetJobManager)

        if self.first_update:
            self.manager_sort_header()
            self.first_update = False
Пример #11
0
async def request_job():
    jobs = Job().request()
    if (jobs == False):
        raise HTTPException(status_code=404, detail="no jobs available")
    else:
        return parseJson(jobs)
Пример #12
0
async def get_all_jobs():
    jobs = Job().getAll()
    if (len(jobs) == 0):
        raise HTTPException(status_code=404, detail="no jobs found")
    else:
        return parseJson(jobs)
Пример #13
0
async def delete_job(id: str):
    ret = Job(id).delete()
    if (ret == False):
        raise HTTPException(status_code=404, detail="job not found")
    else:
        return True
Пример #14
0
	def getJob(self):
		if not hasattr(self, "job"):
			self.job = Job(self.job)
		return self.job
Пример #15
0
class Backup():
	col_name = "backup"
	
	def __init__(self, id=None, job=None, source=None, target=None):
		self.filename = ""
		
		log.clearBuffer()
		
		if(job != None):
			self.setJob(job)
		if(source != None):
			self.setSource(source)
		if(target != None):
			self.setTarget(target)
		
		self.start_time = self.getTimestamp()

		if(id != None):
			self.get(id)

	def getID(self):
		if hasattr(self, "id"):
			return self.id
		else:
			return False

	def getAll(self, filter={}, type="object"):
		log.write("get all backups", "debug")
		docs = self.getDB().getCollection(query=filter)
		
		if(type == "JSON"):
			return docs
		else:
			ret = []
			for d in docs:
				r = Backup(str(d["_id"]))
				ret.append(r)
			return ret

	def getTarget(self):
		if not hasattr(self, "target"):
			self.target = Target(self.target_id)
		return self.target

	def getHost(self):
		if not hasattr(self, "source"):
			self.source = Target(self.host_id)
		return self.target

	def get(self, id):
		log.write("load backup by id: " + id, "debug")
		ret = self.getDB().get(id)
		
		if ret:
			for k, v in ret.items():
				setattr(self, k, v)
			self.id = str(ret["_id"])
			del(self._id)
			self.exist = True
		else: 
			return False

	def getDB(self):
		return DB(self.col_name)

	def toJSON(self):	
		r = self
		return json.dumps(self, default=lambda o: o.__dict__, 
			sort_keys=True, indent=4)

	def setJob(self, job):
		self.job = job

	def getJob(self):
		if not hasattr(self, "job"):
			self.job = Job(self.job)
		return self.job

	def setTarget(self, target):
		self.target = target

	def getTarget(self):
		if not hasattr(self, "target"):
			self.target = Target(self.target_id)
		return self.target
	
	def setSource(self, source):
		self.source = source

	def getSource(self):
		if not hasattr(self, "source"):
			self.source = Host(self.host_id)
		return self.source

	def getTimestamp(self):
		now = datetime.datetime.now()
		ts = now.strftime('%Y-%m-%d_%H-%M-%S')
		return ts

	def getNewBackupPath(self):
		return self.getBackupRoot() + "/" + self.start_time + "-" + self.task.getName() + ".tar.gz"

	def getContainerBackupPath(self, name):
		return self.getBackupRoot() + "/" + self.start_time + "-" + name + ".tar.gz"

	def getStackBackupPath(self, stack, name):
		return self.getBackupRootShort() + "/" + stack + "/" + self.start_time + "-" + name + ".tar.gz"

	def run(self, task):
		self.task = task
	
		log.write("run task: " + str(self.task.getID()))
		self.prepare(task)

		if(self.task.getType() == "file"):
			log.write("init file backup", "notice")
			self.filename = self.getNewBackupPath()
			
			self.backupFile()
			
			return True

		elif(self.task.getType() == "docker"):
			log.write("init docker backup", "notice")
			c_ids = []
			
			if self.task.getData("container"):
				for container in self.task.getData("container"):
					log.write("fetch ids for container name: " + container, "debug")
					c_ids = self.source.getContainersByName(container)		
					if len(c_ids) == 0:
						log.write("no matching containers found", "debug")
					else:
						for id in c_ids:
							self.filename = self.getContainerBackupPath(container)
							self.backupDockerContainer(id, container)

			if self.task.getData("stacks"):
				for stack in self.task.getData("stacks"):			
					log.write("fetch containers for stack name: " + stack, "debug")
					c_names = self.source.getContainersByStack(stack)
					if len(c_names) == 0:
						log.write("no matching containers found", "debug")
					else:
						for container in c_names:
							self.filename = self.getStackBackupPath(stack, container)
							for id in self.source.getContainersByName(container):
								self.backupDockerContainer(id, container, stack)

			self.purgeOld()
			log.write("finish docker backup", "debug")

			# self.target.openFile(filename)
		else:
			log.write("error unsupported task type: " + task["type"])

	def backupFile(self):
		if(not self.target.fileExists(self.getBackupRoot())):
			log.write("backup root does not exist. creating: " + self.getBackupRoot())
			self.target.createDirectoryRecursive(self.getBackupRoot())
			
		self.target.openFile(self.filename)
		self.source.createArchiveFromPaths(self.task.getData())
						
		while True:
			data = self.source.readBinary()
			if not data:
				break
			self.target.writeFile(data)
			
		log.write("finish backup")

		self.addBackupEntry()
		self.target.closeFile()


	def backupDockerContainer(self, id, container, stack=None):
		log.write("start backup of container %s to %s" % (id, self.filename))

		if not self.target.fileExists(os.path.dirname(self.getFilename())):
			log.write("backup root does not exist. creating: " + os.path.dirname(self.getFilename()))
			self.target.createDirectoryRecursive(os.path.dirname(self.getFilename()))

		self.target.openFile(self.getFilename())
		self.source.createArchiveFromContainerId(id)
					
		while True:
			data = self.source.readBinary()
			if not data:
				break
			self.target.writeFile(data)
		
		self.target.closeFile()

		log.write("finish backup of container %s" % (id))
		
		logs = {"container": container}
		if not stack == None:
			logs["stack"] = stack
		
		self.addBackupEntry(logs)
		return True		

	def exists(self):
		if hasattr(self, "exist"):
			return self.exist
		else:
			return False

	def getFilename(self):
		if hasattr(self, "filename"):
			return self.filename
		return False
	
	def getSnapshots(self):
		log.write("get snapshots")
		
		filter = {
			"task_id": self.getTask().getID(),
			"host_id": self.getHost().conf.getID(),
			"target_id": self.getTarget().conf.getID()
		}
		
		ret = self.getDB().find(filter=filter).sort("start_time", 1)
		
		log.write("found %i snapshots" % (ret.count()))
		
		return ret

	def deleteOldestSnapshot(self):
		id = self.getSnapshots()[0]["_id"]
		return Backup(id).delete()

	def purgeOld(self):
		log.write("purge old backups", "info")
		while self.getSnapshots().count() > self.getJob().getMaxSnapshotCount():
			self.deleteOldestSnapshot()

		return True

	def addBackupEntry(self, data={}):
		log.write("mark backup as compeleted", "debug")
		
		self.end_time = self.getTimestamp()
		
		doc = {"job_id": str(self.job.getID()), 
				"filename": self.filename, 
				"task_id": self.task.getID(), 
				"host_id": self.source.conf.getID(), 
				"target_id": self.target.conf.getID(),
				"type": self.task.getType(),
				"hostname": self.source.conf.getHostname(), "log": log.getBuffer(),"start_time": self.start_time, "end_time": self.end_time}
		
		self.getDB().addDoc(doc)

	def restore(self, overwrite=True, replace=False):
		if(self.task.getType() == "file"):
			log.write("restore backup " + self.getID(), "notice")
			self.getTarget().prepare()
			self.getTarget().getConnection().openFile(self.getFilename(), "rb")
	
			self.getSource().prepare()
			self.getSource().getConnection().restoreArchive()
	
			while True:
				data = self.getTarget().getConnection().readBinary()
				if not data:
					break
				self.getSource().getConnection().writeBinary(data)
			self.getSource().getConnection().closeFile()
			self.getSource().getConnection().syncDirectory()
			
			log.write("restored backup " + self.getID())
		else:
			log.write("error: restore not supported for this type", "error")
			return False

	def getBackupRootShort(self):
		return "/backup/" + str(self.job.getID()) + "/" + self.task.getID() + "/"
	
	def getBackupRootStack(self, stack):
		return "/backup/" + str(self.job.getID()) + "/" + self.task.getID() + "/" + stack + "/"
	
	def getBackupRoot(self):
		return "/backup/" + str(self.job.getID()) + "/" + self.task.getID() + "/" + self.source.conf.getHostname() + "/"

	def getTask(self):
		if hasattr(self, "task"):
			return self.task
		return False

	def prepare(self, task):
		self.task = task

	def delete(self):
		if(not self.exists()):
			return True
		log.write("delete backup " + str(self.getID()), "debug")
		
		self.getTarget().prepare()
		self.getTarget().getConnection().deleteFile(self.getFilename())
		return self.getDB().deleteById(self.getID())
Пример #16
0
class Dump():
    col_name = "dump"

    cmd = {
        "mongodb": {
            "dump": "mongodump -o /dumps",
            "cred_format": "--uri='%s'",
            "db_format": "--db="
        },
        "mysql": {
            "dump": "mysqldump --comments --routines ",
            "dump_all":
            'mkdir /dumps; cd /dumps; mysql -N -e "show databases" | grep -vE "^(mysql|performance_schema|information_schema)$" | while read dbname; do mysqldump --complete-insert --routines --triggers --single-transaction "$dbname" | gzip > "$dbname".sql.gz; done',
            "db_format": "%s"
        }
    }

    def __init__(self, id=None, job=None, source=None, target=None):
        self.filename = ""

        log.clearBuffer()

        if (job != None):
            self.setJob(job)
        if (source != None):
            self.setSource(source)
        if (target != None):
            self.setTarget(target)

        self.start_time = self.getTimestamp()

        if (id != None):
            self.get(id)

    def get(self, id):
        log.write("load dump by id: " + id, "debug")
        ret = self.getDB().get(id)

        if ret:
            for k, v in ret.items():
                setattr(self, k, v)
            self.id = str(ret["_id"])
            del (self._id)
            self.exist = True
        else:
            return False

    def getAll(self, filter={}, type="object"):
        log.write("get all dumps", "debug")
        docs = self.getDB().getCollection(query=filter)

        if (type == "JSON"):
            return docs
        else:
            ret = []
            for d in docs:
                r = Dump(str(d["_id"]))
                ret.append(r)
            return ret

    def getCmd(self):
        type = self.task.getType()
        if type:
            return self.cmd[type]
        return False

    def getTimestamp(self):
        now = datetime.datetime.now()
        ts = now.strftime('%Y-%m-%d_%H-%M-%S')
        return ts

    def getDB(self):
        return DB(self.col_name)

    def exists(self):
        if hasattr(self, "exist"):
            return self.exist
        else:
            return False

    def getFilename(self):
        if hasattr(self, "filename"):
            return self.filename
        return False

    def getSnapshots(self):
        log.write("get snapshots")

        filter = {
            "task_id": self.getTask().getID(),
            "host_id": self.getHost().getID(),
            "target_id": self.getTarget().getID()
        }

        ret = self.getDB.find(filter=filter).sort("start_time", 1)

        log.write("found %i snapshots" % (ret.count()))

        return ret

    def deleteOldestSnapshot(self):
        id = self.getSnapshots()[0]["_id"]
        return Dump(id).delete()

    def purgeOld(self):
        log.write("purge old dumps", "info")
        while self.getSnapshots().count() > self.getJob().getMaxSnapshotCount(
        ):
            self.deleteOldestSnapshot()

        return True

    def toJSON(self):
        r = self
        return json.dumps(self,
                          default=lambda o: o.__dict__,
                          sort_keys=True,
                          indent=4)

    def getID(self):
        if hasattr(self, "id"):
            return self.id
        else:
            return False

    def setJob(self, job):
        self.job = job

    def getJob(self):
        if not hasattr(self, "job"):
            self.job = Job(self.job)
        return self.job

    def setTarget(self, target):
        self.target = target

    def getTarget(self):
        if not hasattr(self, "target"):
            self.target = Target(self.target_id)
        return self.target

    def setSource(self, source):
        self.source = source

    def getSource(self):
        if not hasattr(self, "source"):
            self.source = Host(self.host_id)
        return self.source

    def getTarget(self):
        if not hasattr(self, "target"):
            self.target = Target(self.target_id)
        return self.target

    def getHost(self):
        if not hasattr(self, "host"):
            self.host = Target(self.host_id)
        return self.target
        return ts

    def getType(self):
        if not hasattr(self, "type"):
            return self.type
        return False

    def getTask(self):
        if hasattr(self, "task"):
            return self.task
        return False

    def prepare(self, task):
        self.task = task

    def delete(self):
        if (not self.exists()):
            return True
        log.write("delete dump " + str(self.getID()), "debug")

        self.getTarget().prepare()
        self.getTarget().getConnection().deleteFile(self.getFilename())
        return self.getDB().deleteById(self.getID())

    def run(self, task):
        self.task = task

        log.write("run task: " + str(self.task.getID()))
        self.prepare(task)

        if self.task.getData("container"):
            for container in self.task.getData("container"):
                self.c = self.task.getData("container")[container]
                log.write("fetch ids for container name: " + container,
                          "debug")
                c_ids = self.source.getContainersByName(container)
                if len(c_ids) == 0:
                    log.write("no matching containers found", "debug")
                else:
                    for c_id in c_ids:
                        self.container_id = c_id
                        if "db" in self.c:
                            if type(self.c["db"]) is str:
                                self.c["db"] = [self.c["db"]]

                            for db in self.c["db"]:
                                self.container = container
                                self.filename = self.getDumpFilename(
                                    db=db, container=self.container)
                                self.backupDB(db=db, container_id=c_id)

                        else:
                            self.container = container
                            self.filename = self.getDumpFilename(
                                container=self.container)
                            self.backupDB(container_id=c_id)

        if self.task.getData("stacks"):
            for stack in self.task.getData("stacks"):
                self.c = self.task.getData("stacks")[stack]
                log.write("fetch containers for stack name: " + stack, "debug")
                c_ids = self.source.getContainersByStack(stack)
                if len(c_ids) == 0:
                    log.write("no matching containers found", "debug")
                else:
                    for c_id in c_ids:
                        if "db" in self.c:
                            if type(self.c["db"]) is str:
                                self.c["db"] = [self.c["db"]]

                            for db in self.c["db"]:
                                self.filename = self.getDumpFilename(
                                    db=db, stack=stack)
                                self.backupDB(db=db, container_id=c_id)

                        else:
                            self.filename = self.getDumpFilename(stack=stack)
                            self.backupDB(container_id=c_id)

    def getDumpFilename(self, db=None, container=None, stack=None):
        base = "/dumps/" + str(
            self.job.getID()) + "/" + self.task.getID() + "/"

        if (stack != None):
            base += stack + "/"
        if (container != None):
            base += container + "/"

        if (db != None):
            filename = self.getTimestamp() + "_" + db
        else:
            filename = self.getTimestamp() + "_all_databases"

        if self.task.getType() == "mongodb":
            filename += ".tar"
        elif self.task.getType() == "mysql":
            filename += ".tar"

        return base + filename

    def getFilename(self):
        if hasattr(self, "filename"):
            return self.filename
        return False

    def backupDB(self, db=None, container_id=None):
        state = False
        backup_root = os.path.dirname(self.getFilename())
        if (not self.target.fileExists(backup_root)):
            log.write("dump root does not exist. creating: " + backup_root)
            self.target.createDirectoryRecursive(backup_root)

        cmd = self.getCmd()["dump"]

        if "port" in self.c:
            cmd += " --port " + str(self.c["port"])

        if (db != None):
            cmd += " " + self.getCmd()["db_format"] + db
        elif ("dump_all" in self.getCmd()):
            cmd = self.getCmd()["dump_all"]

        if "gzip" in self.c:
            cmd += " | gzip"

        self.target.openFile(self.filename)

        if (container_id != None):
            self.source.execCommandDocker(container_id, cmd, wait=True)
        else:
            self.source.execCommand(cmd, wait=True)

        if (self.task.getType() in ["mongodb", "mysql"]):
            data = self.source.read()

            if (container_id != None):
                self.source.execCommandDocker(
                    container_id, "tar -Oc /dumps 2>/dev/null | cat")
            else:
                self.source.execCommand("tar -Oc /dumps 2>/dev/null | cat")

        while True:
            data = self.source.readBinary()
            state = True
            if not data:
                break
            self.target.writeFile(data)

        if (self.task.getType() in ["mongodb", "mysql"]):
            if (container_id != None):
                self.source.execCommandDocker(container_id, "rm -rf /dumps")
            else:
                self.source.execCommand("rm -rf /dumps")
        self.target.closeFile()

        if (state == False):
            log.write("error: no data received", "error")
            self.getTarget().getConnection().deleteFile()
        else:
            self.addDumpEntry()
            log.write("finish dump")

    def addDumpEntry(self, data={}):
        log.write("mark dump as compeleted", "debug")

        self.end_time = self.getTimestamp()

        doc = {
            "job_id": str(self.job.getID()),
            "filename": self.filename,
            "task_id": self.task.getID(),
            "host_id": self.source.conf.getID(),
            "target_id": self.target.conf.getID(),
            "type": self.task.getType(),
            "hostname": self.source.conf.getHostname(),
            "log": log.getBuffer(),
            "start_time": self.start_time,
            "end_time": self.end_time
        }

        self.getDB().addDoc(doc)