Example #1
0
 def __enter__(self):
     self.analysis_id = uuid.uuid4().hex
     self.upq = Queue.Queue()
     logger.info("Server registering")
     self.boundary = Z.register_analysis(self.analysis_id, self.upq)
     logger.info("Server has registered")
     return self
 def __enter__(self):
     self.analysis_id = uuid.uuid4().hex
     self.upq = Queue.Queue()
     logger.info("Server registering")
     self.boundary = Z.register_analysis(self.analysis_id,
                                         self.upq)
     logger.info("Server has registered")
     return self
Example #3
0
    def jobserver(self, analysis_id, start_signal):
        # this server subthread should be very lightweight, as it has to handle
        # all the requests from workers, of which there might be several.

        # start the zmqrequest Boundary
        request_queue = Queue.Queue()
        boundary = register_analysis(analysis_id, 
                                     request_queue)
        #
        # The boundary is announcing our analysis at this point. Workers
        # will get announcements if they connect.
        #
        start_signal.release()

        # XXX - is this just to keep from posting another AnalysisPaused event?
        # If so, probably better to simplify the code and keep sending them
        # (should be only one per second).
        i_was_paused_before = False

        # start serving work until the analysis is done (or changed)
        while not self.cancelled:

            with self.jobserver_work_cv:
                if self.paused and not i_was_paused_before:
                    self.post_event(AnalysisPaused())
                    i_was_paused_before = True
                if self.paused or request_queue.empty():
                    self.jobserver_work_cv.wait(1)  # we timeout in order to keep announcing ourselves.
                    continue  # back to while... check that we're still running

            if i_was_paused_before:
                self.post_event(AnalysisResumed())
                i_was_paused_before = False

            try:
                req = request_queue.get(timeout=0.25)
            except Queue.Empty:
                continue
            
            if isinstance(req, PipelinePreferencesRequest):
                logger.debug("Received pipeline preferences request")
                req.reply(Reply(pipeline_blob=np.array(self.pipeline_as_string()),
                                preferences=cpprefs.preferences_as_dict()))
                logger.debug("Replied to pipeline preferences request")
            elif isinstance(req, InitialMeasurementsRequest):
                logger.debug("Received initial measurements request")
                req.reply(Reply(buf=self.initial_measurements_buf))
                logger.debug("Replied to initial measurements request")
            elif isinstance(req, WorkRequest):
                if not self.work_queue.empty():
                    logger.debug("Received work request")
                    job, worker_runs_post_group, wants_dictionary = \
                        self.work_queue.get()
                    req.reply(WorkReply(
                        image_set_numbers=job, 
                        worker_runs_post_group=worker_runs_post_group,
                        wants_dictionary = wants_dictionary))
                    self.queue_dispatched_job(job)
                    logger.debug("Dispatched job: image sets=%s" % 
                                 ",".join([str(i) for i in job]))
                else:
                    # there may be no work available, currently, but there
                    # may be some later.
                    req.reply(NoWorkReply())
            elif isinstance(req, ImageSetSuccess):
                # interface() is responsible for replying, to allow it to
                # request the shared_state dictionary if needed.
                logger.debug("Received ImageSetSuccess")
                self.queue_imageset_finished(req)
                logger.debug("Enqueued ImageSetSuccess")
            elif isinstance(req, SharedDictionaryRequest):
                logger.debug("Received shared dictionary request")
                req.reply(SharedDictionaryReply(dictionaries=self.shared_dicts))
                logger.debug("Sent shared dictionary reply")
            elif isinstance(req, MeasurementsReport):
                logger.debug("Received measurements report")
                self.queue_received_measurements(req.image_set_numbers,
                                                 req.buf)
                req.reply(Ack())
                logger.debug("Acknowledged measurements report")
            elif isinstance(req, (InteractionRequest, DisplayRequest, 
                                  DisplayPostGroupRequest,
                                  ExceptionReport, DebugWaiting, DebugComplete,
                                  OmeroLoginRequest)):
                logger.debug("Enqueueing interactive request")
                # bump upward
                self.post_event(req)
                logger.debug("Interactive request enqueued")
            else:
                msg = "Unknown request from worker: %s of type %s" % (req, type(req))
                logger.error(msg)
                raise ValueError(msg)

        # stop the ZMQ-boundary thread - will also deal with any requests waiting on replies
        boundary.cancel(analysis_id)
Example #4
0
    def jobserver(self, analysis_id, start_signal):
        # this server subthread should be very lightweight, as it has to handle
        # all the requests from workers, of which there might be several.

        # start the zmqrequest Boundary
        request_queue = Queue.Queue()
        boundary = register_analysis(analysis_id, 
                                     request_queue)
        #
        # The boundary is announcing our analysis at this point. Workers
        # will get announcements if they connect.
        #
        start_signal.release()

        # XXX - is this just to keep from posting another AnalysisPaused event?
        # If so, probably better to simplify the code and keep sending them
        # (should be only one per second).
        i_was_paused_before = False

        # start serving work until the analysis is done (or changed)
        while not self.cancelled:

            with self.jobserver_work_cv:
                if self.paused and not i_was_paused_before:
                    self.post_event(AnalysisPaused())
                    i_was_paused_before = True
                if self.paused or request_queue.empty():
                    self.jobserver_work_cv.wait(1)  # we timeout in order to keep announcing ourselves.
                    continue  # back to while... check that we're still running

            if i_was_paused_before:
                self.post_event(AnalysisResumed())
                i_was_paused_before = False

            try:
                req = request_queue.get(timeout=0.25)
            except Queue.Empty:
                continue
            
            if isinstance(req, PipelinePreferencesRequest):
                req.reply(Reply(pipeline_blob=np.array(self.pipeline_as_string()),
                                preferences=cpprefs.preferences_as_dict()))
            elif isinstance(req, InitialMeasurementsRequest):
                req.reply(Reply(buf=self.initial_measurements_buf))
            elif isinstance(req, WorkRequest):
                if not self.work_queue.empty():
                    job, worker_runs_post_group, wants_dictionary = \
                        self.work_queue.get()
                    req.reply(WorkReply(
                        image_set_numbers=job, 
                        worker_runs_post_group=worker_runs_post_group,
                        wants_dictionary = wants_dictionary))
                    self.queue_dispatched_job(job)
                else:
                    # there may be no work available, currently, but there
                    # may be some later.
                    req.reply(NoWorkReply())
            elif isinstance(req, ImageSetSuccess):
                # interface() is responsible for replying, to allow it to
                # request the shared_state dictionary if needed.
                self.queue_imageset_finished(req)
            elif isinstance(req, SharedDictionaryRequest):
                req.reply(SharedDictionaryReply(dictionaries=self.shared_dicts))
            elif isinstance(req, MeasurementsReport):
                self.queue_received_measurements(req.image_set_numbers,
                                                 req.buf)
                req.reply(Ack())
            elif isinstance(req, (InteractionRequest, DisplayRequest, 
                                  ExceptionReport, DebugWaiting, DebugComplete,
                                  OmeroLoginRequest)):
                # bump upward
                self.post_event(req)
            else:
                raise ValueError("Unknown request from worker: %s of type %s" % (req, type(req)))

        # stop the ZMQ-boundary thread - will also deal with any requests waiting on replies
        boundary.cancel(analysis_id)
Example #5
0
 def __enter__(self):
     self.analysis_id = uuid.uuid4().hex
     self.upq = Queue.Queue()
     self.boundary = Z.register_analysis(self.analysis_id,
                                         self.upq)
     return self
Example #6
0
 def __enter__(self):
     self.analysis_id = uuid.uuid4().hex
     self.upq = Queue.Queue()
     self.boundary = Z.register_analysis(self.analysis_id, self.upq)
     return self