示例#1
0
class MainWindow(QtGui.QMainWindow, GuiBase, Ui_OAOffline):

    """ Main window class. """

    ### Logger signal
    log_signal = QtCore.pyqtSignal(unicode)

    def __init__(self, parent=None):
        """ Constructor. """
        # Parent constructors
        QtGui.QMainWindow.__init__(self, parent)
        GuiBase.__init__(self, "OAOffline")

        # Setup UI
        self.setupUi(self)

        # Setup widget logger
        self.logger.setupWidgetLogger(self.log_signal)
        self.log_signal.connect(self.update_log)

        # Create LoggerServer for the OA
        try:
            self.logserver = LoggerServer("OAOfflineSrv", self.logger.level())
            self.logserver.start()
        except Exception, e:
            self.logger.error("[%s] Cannot init LogServer (Error: %s)", inspect.stack()[0][3], e)
            self.logserver = None

        # Start timer
        self.timer = self.startTimer(500)

        # Plot dialog list
        self.dialogs = []
示例#2
0
    def __init__(self):
        # Parent init
        threading.Thread.__init__(self)
        self.reload = False

        # Job handling stuff
        self.runningflag = True
        self.num_processes = 2  # Parallel
        self.maxjobs = 6

        # Init logging server
        try:
            self.log_server = LoggerServer("WorkSpawner", LOG_LEVEL, LOG_FILE)
        except:
            self.log_server = None

        # Default values when there's only a name
        self.defaultjobmetainfo = ("", "WSNoOp.process", "")
        self.defaultpostmetainfo = ("", "", "")

        # Job queue from outside
        self.tangoqueue = Queue.Queue()

        # State flag
        self._state = WorkSpawnerServer.STANDBY

        # Setup logging
        self.logger = Logger("WorkSpawnerMaster", LOG_LEVEL, LOG_HOST)
示例#3
0
    def __init__(self, parent=None):
        """ Constructor. """
        # Parent constructors
        QtGui.QMainWindow.__init__(self, parent)
        GuiBase.__init__(self, "OAOffline")

        # Setup UI
        self.setupUi(self)

        # Setup widget logger
        self.logger.setupWidgetLogger(self.log_signal)
        self.log_signal.connect(self.update_log)

        # Create LoggerServer for the OA
        try:
            self.logserver = LoggerServer("OAOfflineSrv", self.logger.level())
            self.logserver.start()
        except Exception, e:
            self.logger.error("[%s] Cannot init LogServer (Error: %s)", inspect.stack()[0][3], e)
            self.logserver = None
示例#4
0
class WorkSpawnerServer(threading.Thread):

    # Status constants
    ON = 0
    OFF = 1
    STANDBY = 2
    RUNNING = 3
    ERROR = 4
    WARN = 5
    NONE = 6

    def __init__(self):
        # Parent init
        threading.Thread.__init__(self)
        self.reload = False

        # Job handling stuff
        self.runningflag = True
        self.num_processes = 2  # Parallel
        self.maxjobs = 6

        # Init logging server
        try:
            self.log_server = LoggerServer("WorkSpawner", LOG_LEVEL, LOG_FILE)
        except:
            self.log_server = None

        # Default values when there's only a name
        self.defaultjobmetainfo = ("", "WSNoOp.process", "")
        self.defaultpostmetainfo = ("", "", "")

        # Job queue from outside
        self.tangoqueue = Queue.Queue()

        # State flag
        self._state = WorkSpawnerServer.STANDBY

        # Setup logging
        self.logger = Logger("WorkSpawnerMaster", LOG_LEVEL, LOG_HOST)

    def start_worker(self, job_queue, result_queue=None):
        """ start_worker(job_queue, result_queue=None)
        Method to create a new worker
        """
        worker = Worker(job_queue, result_queue)
        worker.start()
        return worker

    def getErrorState(self):
        """ Return the error state of the workspawner. """
        if self.log_server is not None:
            log_state = self.log_server.getStatus()
            if log_state == Logger.ERROR:
                return WorkSpawnerServer.ERROR
            elif log_state == Logger.WARN:
                return WorkSpawnerServer.WARN
        return WorkSpawnerServer.NONE

    def getProcessingState(self):
        """ Return the processing state of the workspawner. """
        if self.tangoqueue.empty() and len(self.pending_jobs) == 0 and len(self.pending_post) == 0:
            return WorkSpawnerServer.STANDBY
        else:
            return WorkSpawnerServer.RUNNING

    def multProcessSrv(self):
        """ Main processing function
        """

        # Workers stuff
        job_id = 0
        worker_list = []
        self.job_queue = multiprocessing.Queue()
        self.result_queue = multiprocessing.Queue()
        self.pending_jobs = []

        # Post processing stuff
        post_worker = None
        self.post_jobs = multiprocessing.Queue()
        self.post_results = multiprocessing.Queue()
        self.pending_post = []

        # Start worker processes
        try:
            for i in range(0, self.num_processes):
                worker_list.append(self.start_worker(self.job_queue, self.result_queue))
            if all(self.defaultpostmetainfo[0:2]):
                post_worker = self.start_worker(self.post_jobs, self.post_results)
        except Exception as e:
            self.logger.error("[multProcessSrv] Error starting worker pool (Error: %s)", e)
            return -1

        sighandler.term_interrupt = False
        while self.runningflag and not sighandler.term_interrupt:

            # Check reload signal
            if self.reload:
                # To reload all the workers just send them an empty job
                for i in range(len(worker_list)):
                    self.job_queue.put((None,))
                if post_worker:
                    self.post_jobs.put((None,))
                # Reset log server
                if self.log_server is not None:
                    self.log_server.resetError()
                # Reset reload flag
                self.reload = False

            # Fill the job queue
            while len(self.pending_jobs) < self.maxjobs:
                try:
                    jobinfo = self.tangoqueue.get(timeout=0.2)
                except Queue.Empty:
                    break

                except Exception as e:
                    self.logger.error("[multProcessSrv] Exception reading input job queue (Error: %s)", e)
                    # FIXME: this error should be handled better...
                    break

                # Complete job tuple (filename, module, function, parameters)
                if len(jobinfo) == 1:
                    jobinfo = jobinfo + self.defaultjobmetainfo
                elif len(jobinfo) == 2:
                    jobinfo = jobinfo + self.defaultjobmetainfo[1:]
                elif len(jobinfo) == 3:
                    jobinfo = jobinfo + self.defaultjobmetainfo[2:]

                self.logger.debug("[multProcessSrv] Job meta info: %s", jobinfo)

                self.job_queue.put((job_id,) + jobinfo)
                self.pending_jobs.append(job_id)
                job_id += 1

            # Check that all workers are still alive
            for worker in worker_list:
                if not worker.is_alive():
                    try:
                        worker.join()
                        self.logger.info("[multProcessSrv] worker '%s' terminated.", worker.name)
                        worker_list.remove(worker)
                    except Exception as e:
                        self.logger.error("[multProcessSrv] Error joining a dead worker process (Error: %s)", e)

            # If the number of running workers is more that what is configured we
            # kill a suitable number of processes appending a corresponding
            # number of (None, ) jobs. The first processes reading the null
            # job will be killed
            if self.num_processes < len(worker_list):
                self.job_queue.put((None,))

            # If the number of running workers is less that what is configured
            # we start a suitable number of workers to meet the requirement
            if self.num_processes > len(worker_list):
                try:
                    worker_list.append(self.start_worker(self.job_queue, self.result_queue))
                    self.logger.info("[multProcessSrv] respawned a worker thread.")
                except Exception as e:
                    self.logger.error("[multProcessSrv] Error respawning a worker process (Error: %s)", e)

            # Check if the post-processing worker is needed and if it's
            # running. Terminate it in case is no more configured
            if all(self.defaultpostmetainfo[0:2]):
                if not post_worker:
                    try:
                        post_worker = self.start_worker(self.post_jobs, self.post_results)
                        self.logger.info("[multProcessSrv] started post-processing thread.")
                    except Exception as e:
                        self.logger.error("[multProcessSrv] Error starting post-processing process (Error: %s)", e)

                elif not post_worker.is_alive():
                    try:
                        post_worker.join()
                        post_worker = self.start_worker(self.post_jobs, self.post_results)
                        self.logger.info("[multProcessSrv] respawned post-processing thread.")
                    except Exception as e:
                        self.logger.error("[multProcessSrv] Error respawning post-processing process (Error: %s)", e)

            # Clean up the worker process if for some reason the
            # post-processing got disabled
            elif post_worker:
                self.post_jobs.put((None,))
                try:
                    post_worker.join(timeout=0.2)
                except:
                    pass
                else:
                    try:
                        while True:
                            self.post_jobs.get()
                    except Queue.Empty:
                        pass
                    self.logger.info("[multProcessSrv] terminated post-processing thread")
                    post_worker = None

            # Collect results from processing workers
            while True:
                try:
                    result = self.result_queue.get(timeout=0.2)
                    self.logger.info("[multProcessSrv] Job with ID '%d' returned", result[0])

                    matches = [i for i, jid in enumerate(self.pending_jobs) if jid == result[0]]
                    if len(matches) == 0:
                        self.logger.error("[multProcessSrv] Got result from unexpected job with ID %d", result[0])
                    elif len(matches) > 1:
                        self.logger.error(
                            "[multProcessSrv] Got multiple matches (%d) for job with ID %d", len(matches), result[0]
                        )
                    for m in matches:
                        del self.pending_jobs[m]

                    # Pass return value to post-processing
                    if all(self.defaultpostmetainfo[0:2]) and post_worker:
                        # Submit result to post-processing worker
                        if result[1] != False:
                            self.post_jobs.put(
                                (
                                    result[0],
                                    result[1],
                                    self.defaultpostmetainfo[0],
                                    self.defaultpostmetainfo[1],
                                    self.defaultpostmetainfo[2],
                                )
                            )
                            self.pending_post.append(result[0])

                except Queue.Empty:
                    break
                except Exception as e:
                    self.logger.error(
                        "[multProcessSrv] Got exception while getting results for completed jobs (Error: %s)",
                        e,
                        exc_info=True,
                    )

            # Collect result from post-processing worker
            while True:
                try:
                    result = self.post_results.get(timeout=0.1)
                    if result[1] == True:
                        self.logger.info(
                            "[multProcessSrv] Post-processing of job with ID '%d' completed successfully", result[0]
                        )
                    else:
                        self.logger.info(
                            "[multProcessSrv] Post-processing of job with ID '%d' completed with errors", result[0]
                        )

                    matches = [i for i, jid in enumerate(self.pending_post) if jid == result[0]]
                    if len(matches) == 0:
                        self.logger.error(
                            "[multProcessSrv] Got post-processing result from unexpected job with ID %d", result[0]
                        )
                    elif len(matches) > 1:
                        self.logger.error(
                            "[multProcessSrv] Got multiple matches (%d) for post-processing of job with ID %d",
                            len(matches),
                            result[0],
                        )
                    for m in matches:
                        del self.pending_post[m]

                except Queue.Empty:
                    break
                except Exception as e:
                    self.logger.error(
                        "[multProcessSrv] Got exception while getting results for completed post-processing jobs (Error: %s)",
                        e,
                        exc_info=True,
                    )

        # Stop all processing workers
        for worker in worker_list:
            self.job_queue.put((None,))
        for worker in worker_list:
            worker.join()

        # Stop post-processing worker
        if post_worker:
            self.post_jobs.put((None,))
            post_worker.join()

    def run(self):
        """ WorkSpawner server entry point
        """
        try:
            # Start logging server thread
            if self.log_server is not None:
                self.log_server.start()

            # Start multiprocessing
            self.logger.info("[__init__] Started WS server")
            self.multProcessSrv()

        except Exception as e:
            self.logger.error("[run] Unexpected exception (Error: %s)", e)
            retval = -1
        else:
            retval = 0
        finally:
            # Shutting down logging server
            if self.log_server is not None:
                self.log_server.server.shutdown()
                self.log_server.join()
            return retval