def test_01_05_unicode_value(self): # If the item is already in unicode, don't re-decode gotcha = u"c:\\users\\default" cpprefs.set_preferences_from_dict({}) # clear cache cpprefs.get_config().Write("test_preferences", gotcha) result = cpprefs.config_read("test_preferences") self.assertEqual(result, gotcha)
def do_job(self, job): '''Handle a work request to its completion job - WorkRequest ''' import cellprofiler.pipeline as cpp job_measurements = [] try: send_dictionary = job.wants_dictionary logger.info("Starting job") # Fetch the pipeline and preferences for this analysis if we don't have it current_pipeline, current_preferences = \ self.pipelines_and_preferences.get( self.current_analysis_id, (None, None)) if not current_pipeline: logger.debug("Fetching pipeline and preferences") rep = self.send(PipelinePreferencesRequest( self.current_analysis_id)) logger.debug("Received pipeline and preferences response") preferences_dict = rep.preferences # update preferences to match remote values cpprefs.set_preferences_from_dict(preferences_dict) logger.debug("Loading pipeline") pipeline_blob = rep.pipeline_blob.tostring() current_pipeline = cpp.Pipeline() current_pipeline.loadtxt(StringIO.StringIO(pipeline_blob), raise_on_error=True) logger.debug("Pipeline loaded") current_pipeline.add_listener( self.pipeline_listener.handle_event) current_preferences = rep.preferences self.pipelines_and_preferences[self.current_analysis_id] = ( current_pipeline, current_preferences) else: # update preferences to match remote values cpprefs.set_preferences_from_dict(current_preferences) # Reset the listener's state self.pipeline_listener.reset() logger.debug("Getting initial measurements") # Fetch the path to the intial measurements if needed. current_measurements = self.initial_measurements.get( self.current_analysis_id) if current_measurements is None: logger.debug("Sending initial measurements request") rep = self.send(InitialMeasurementsRequest( self.current_analysis_id)) logger.debug("Got initial measurements") current_measurements = \ self.initial_measurements[self.current_analysis_id] = \ cpmeas.load_measurements_from_buffer(rep.buf) else: logger.debug("Has initial measurements") # Make a copy of the measurements for writing during this job current_measurements = cpmeas.Measurements(copy=current_measurements) all_measurements.add(current_measurements) job_measurements.append(current_measurements) successful_image_set_numbers = [] image_set_numbers = job.image_set_numbers worker_runs_post_group = job.worker_runs_post_group logger.info("Doing job: " + ",".join(map(str, image_set_numbers))) self.pipeline_listener.image_set_number = image_set_numbers[0] if not worker_runs_post_group: # Get the shared state from the first imageset in this run. shared_dicts = self.send( SharedDictionaryRequest(self.current_analysis_id)).dictionaries assert len(shared_dicts) == len(current_pipeline.modules()) for module, new_dict in zip(current_pipeline.modules(), shared_dicts): module.set_dictionary_for_worker(new_dict) # Run prepare group if this is the first image in the group. We do # this here (even if there's no grouping in the pipeline) to ensure # that any changes to the modules' shared state dictionaries get # propagated correctly. should_process = True if current_measurements[cpmeas.IMAGE, cpmeas.GROUP_INDEX, image_set_numbers[0]] == 1: workspace = cpw.Workspace(current_pipeline, None, None, None, current_measurements, None, None) if not current_pipeline.prepare_group( workspace, current_measurements.get_grouping_keys(), image_set_numbers): # exception handled elsewhere, possibly cancelling this run. should_process = False del workspace # process the images if should_process: abort = False for image_set_number in image_set_numbers: gc.collect() try: self.pipeline_listener.image_set_number = image_set_number current_pipeline.run_image_set( current_measurements, image_set_number, self.interaction_handler, self.display_handler, self.cancel_handler) if self.pipeline_listener.should_abort: abort = True break elif self.pipeline_listener.should_skip: # Report skipped image sets as successful so that # analysis can complete. # Report their measurements because some modules # may have provided measurements before skipping. pass successful_image_set_numbers.append(image_set_number) # Send an indication that the image set finished successfully. if send_dictionary: # The jobserver would like a copy of our modules' # run_state dictionaries. ws = cpw.Workspace(current_pipeline, None, None, None, current_measurements, None, None) dicts = [m.get_dictionary_for_worker() for m in current_pipeline.modules()] req = ImageSetSuccessWithDictionary( self.current_analysis_id, image_set_number=image_set_number, shared_dicts = dicts) else: req = ImageSetSuccess( self.current_analysis_id, image_set_number = image_set_number) rep = self.send(req) except cpp.CancelledException: logging.info("Aborting job after cancellation") abort = True except Exception: try: logging.error("Error in pipeline", exc_info=True) if self.handle_exception( image_set_number=image_set_number) == ED_STOP: abort = True break except: logging.error("Error in handling of pipeline exception", exc_info=True) # this is bad. We can't handle nested exceptions # remotely so we just fail on this run. abort = True if abort: current_measurements.close() job_measurements.remove(current_measurements) return if worker_runs_post_group: workspace = cpw.Workspace(current_pipeline, None, current_measurements, None, current_measurements, None, None) workspace.interaction_handler = self.interaction_handler workspace.cancel_handler = self.cancel_handler workspace.post_group_display_handler = \ self.post_group_display_handler # There might be an exception in this call, but it will be # handled elsewhere, and there's nothing we can do for it # here. current_pipeline.post_group( workspace, current_measurements.get_grouping_keys()) # send measurements back to server req = MeasurementsReport(self.current_analysis_id, buf=current_measurements.file_contents(), image_set_numbers=image_set_numbers) rep = self.send(req) except cpp.CancelledException: # Main thread received shutdown signal raise except Exception: logging.error("Error in worker", exc_info=True) if self.handle_exception() == ED_STOP: raise cpp.CancelledException("Cancelling after user-requested stop") finally: # Clean up any measurements owned by us for m in job_measurements: m.close()
def do_job(self, job): '''Handle a work request to its completion job - WorkRequest ''' import cellprofiler.pipeline as cpp job_measurements = [] try: send_dictionary = job.wants_dictionary logger.info("Starting job") # Fetch the pipeline and preferences for this analysis if we don't have it current_pipeline, current_preferences = \ self.pipelines_and_preferences.get( self.current_analysis_id, (None, None)) if not current_pipeline: logger.debug("Fetching pipeline and preferences") rep = self.send( PipelinePreferencesRequest(self.current_analysis_id)) logger.debug("Received pipeline and preferences response") preferences_dict = rep.preferences # update preferences to match remote values cpprefs.set_preferences_from_dict(preferences_dict) logger.debug("Loading pipeline") pipeline_blob = rep.pipeline_blob.tostring() current_pipeline = cpp.Pipeline() current_pipeline.loadtxt(StringIO.StringIO(pipeline_blob), raise_on_error=True) logger.debug("Pipeline loaded") current_pipeline.add_listener( self.pipeline_listener.handle_event) current_preferences = rep.preferences self.pipelines_and_preferences[self.current_analysis_id] = ( current_pipeline, current_preferences) else: # update preferences to match remote values cpprefs.set_preferences_from_dict(current_preferences) # Reset the listener's state self.pipeline_listener.reset() logger.debug("Getting initial measurements") # Fetch the path to the intial measurements if needed. current_measurements = self.initial_measurements.get( self.current_analysis_id) if current_measurements is None: logger.debug("Sending initial measurements request") rep = self.send( InitialMeasurementsRequest(self.current_analysis_id)) logger.debug("Got initial measurements") current_measurements = \ self.initial_measurements[self.current_analysis_id] = \ cpmeas.load_measurements_from_buffer(rep.buf) else: logger.debug("Has initial measurements") # Make a copy of the measurements for writing during this job current_measurements = cpmeas.Measurements( copy=current_measurements) all_measurements.add(current_measurements) job_measurements.append(current_measurements) successful_image_set_numbers = [] image_set_numbers = job.image_set_numbers worker_runs_post_group = job.worker_runs_post_group logger.info("Doing job: " + ",".join(map(str, image_set_numbers))) self.pipeline_listener.image_set_number = image_set_numbers[0] if not worker_runs_post_group: # Get the shared state from the first imageset in this run. shared_dicts = self.send( SharedDictionaryRequest( self.current_analysis_id)).dictionaries assert len(shared_dicts) == len(current_pipeline.modules()) for module, new_dict in zip(current_pipeline.modules(), shared_dicts): module.set_dictionary_for_worker(new_dict) # Run prepare group if this is the first image in the group. We do # this here (even if there's no grouping in the pipeline) to ensure # that any changes to the modules' shared state dictionaries get # propagated correctly. should_process = True if current_measurements[cpmeas.IMAGE, cpmeas.GROUP_INDEX, image_set_numbers[0]] == 1: workspace = cpw.Workspace(current_pipeline, None, None, None, current_measurements, None, None) if not current_pipeline.prepare_group( workspace, current_measurements.get_grouping_keys(), image_set_numbers): # exception handled elsewhere, possibly cancelling this run. should_process = False del workspace # process the images if should_process: abort = False for image_set_number in image_set_numbers: try: self.pipeline_listener.image_set_number = image_set_number last_workspace = current_pipeline.run_image_set( current_measurements, image_set_number, self.interaction_handler, self.display_handler, self.cancel_handler) if self.pipeline_listener.should_abort: abort = True break elif self.pipeline_listener.should_skip: # Report skipped image sets as successful so that # analysis can complete. # Report their measurements because some modules # may have provided measurements before skipping. pass successful_image_set_numbers.append(image_set_number) # Send an indication that the image set finished successfully. if send_dictionary: # The jobserver would like a copy of our modules' # run_state dictionaries. dicts = [ m.get_dictionary_for_worker() for m in current_pipeline.modules() ] req = ImageSetSuccessWithDictionary( self.current_analysis_id, image_set_number=image_set_number, shared_dicts=dicts) else: req = ImageSetSuccess( self.current_analysis_id, image_set_number=image_set_number) rep = self.send(req) except cpp.CancelledException: logging.info("Aborting job after cancellation") abort = True except Exception: try: logging.error("Error in pipeline", exc_info=True) if self.handle_exception( image_set_number=image_set_number ) == ED_STOP: abort = True break except: logging.error( "Error in handling of pipeline exception", exc_info=True) # this is bad. We can't handle nested exceptions # remotely so we just fail on this run. abort = True if abort: current_measurements.close() job_measurements.remove(current_measurements) return if worker_runs_post_group: last_workspace.interaction_handler = \ self.interaction_handler last_workspace.cancel_handler = self.cancel_handler last_workspace.post_group_display_handler = \ self.post_group_display_handler # There might be an exception in this call, but it will be # handled elsewhere, and there's nothing we can do for it # here. current_pipeline.post_group( last_workspace, current_measurements.get_grouping_keys()) del last_workspace # send measurements back to server req = MeasurementsReport(self.current_analysis_id, buf=current_measurements.file_contents(), image_set_numbers=image_set_numbers) rep = self.send(req) except cpp.CancelledException: # Main thread received shutdown signal raise except Exception: logging.error("Error in worker", exc_info=True) if self.handle_exception() == ED_STOP: raise cpp.CancelledException( "Cancelling after user-requested stop") finally: # Clean up any measurements owned by us for m in job_measurements: m.close()
def test_01_04_old_users_directory(self): gotcha = "c:\\users\\default" cpprefs.set_preferences_from_dict({}) # clear cache cpprefs.get_config().Write("test_preferences", gotcha) result = cpprefs.config_read("test_preferences") self.assertEqual(result, gotcha)