def test_01_03_load_v02(): data = r"""CellProfiler Pipeline: http://www.cellprofiler.org Version:3 DateRevision:20130522170932 ModuleCount:1 HasImagePlaneDetails:False SmoothMultichannel:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True] Select the input image:InputImage Name the output image:OutputImage Select smoothing method:Median Filter Calculate artifact diameter automatically?:Yes Typical artifact diameter, in pixels:19.0 Edge intensity difference:0.2 Clip intensity at 0 and 1:No """ pipeline = cpp.Pipeline() cpmodules.fill_modules() cpmodules.add_module_for_tst(S.SmoothMultichannel) pipeline.load(io.StringIO(data)) assert len(pipeline.modules()) == 1 smooth = pipeline.modules()[0] assert isinstance(smooth, S.SmoothMultichannel) assert smooth.image_name == "InputImage" assert smooth.filtered_image_name == "OutputImage" assert smooth.wants_automatic_object_size assert smooth.object_size == 19 assert smooth.smoothing_method == S.MEDIAN_FILTER assert not smooth.clip
def make_workspace(self, images): """Make a workspace """ module = S.StackImages() pipeline = cpp.Pipeline() object_set = cpo.ObjectSet() image_set_list = cpi.ImageSetList() image_set = image_set_list.get_image_set(0) workspace = cpw.Workspace( pipeline, module, image_set, object_set, cpmeas.Measurements(), image_set_list, ) # setup the input images names = [INPUT_IMAGE_BASENAME + str(i) for i, img in enumerate(images)] for img, nam in zip(images, names): image_set.add(nam, cpi.Image(img)) # setup the input images settings module.stack_image_name.value = OUTPUT_IMAGE_NAME nimgs = len(images) while len(module.stack_channels) < nimgs: module.add_stack_channel_cb() for sc, imname in zip(module.stack_channels, names): sc.image_name.value = imname return workspace, module
def test_01_06_run_pipeline(self): import cellprofiler_core.pipeline as cpp import cellprofiler_core.module as cpm def callback(caller, event): self.assertFalse( isinstance(event, (cpp.event.LoadException, cpp.event.RunException))) pipeline = cpp.Pipeline() pipeline.add_listener(callback) fly_pipe = get_test_resources_directory("../ExampleFlyURL.cppipe") pipeline.load(fly_pipe) while True: removed_something = False for module in reversed(pipeline.modules()): self.assertTrue(isinstance(module, cpm.Module)) if module.module_name in ("SaveImages", "CalculateStatistics", "ExportToSpreadsheet", "ExportToDatabase"): pipeline.remove_module(module.module_num) removed_something = True break if not removed_something: break for module in pipeline.modules(): module.show_window = False m = pipeline.run(image_set_end=1)
def test_01_05_load_pipeline(self): import cellprofiler_core.pipeline as cpp def callback(caller, event): self.assertFalse(isinstance(event, cpp.event.LoadException)) pipeline = cpp.Pipeline() pipeline.add_listener(callback) fly_pipe = get_test_resources_directory("../ExampleFlyURL.cppipe") pipeline.load(fly_pipe)
def load_pipeline(pipeline_path): pipeline = cpp.Pipeline() pipeline.load(pipeline_path) # Remove first 4 modules: Images, Metadata, NamesAndTypes, Groups... # (replaced by InjectImage module below) for i in range(4): print('Remove module: ', pipeline.modules()[0].module_name) pipeline.remove_module(1) print('Pipeline modules:') for module in pipeline.modules(): print(module.module_num, module.module_name) return pipeline
def make_workspace(image, mask): """Make a workspace for testing FilterByObjectMeasurement""" module = S.SmoothMultichannel() pipeline = cpp.Pipeline() object_set = cpo.ObjectSet() image_set_list = cpi.ImageSetList() image_set = image_set_list.get_image_set(0) workspace = cpw.Workspace(pipeline, module, image_set, object_set, cpmeas.Measurements(), image_set_list) image_set.add(INPUT_IMAGE_NAME, cpi.Image(image, mask, scale=1)) module.image_name.value = INPUT_IMAGE_NAME module.filtered_image_name.value = OUTPUT_IMAGE_NAME return workspace, module
def test_01_05_load_pipeline(self): import cellprofiler_core.pipeline as cpp def callback(caller, event): self.assertFalse(isinstance(event, cpp.event.LoadException)) pipeline = cpp.Pipeline() pipeline.add_listener(callback) try: fd = urlopen(self.fly_url) except IOError as e: def bad_url(e=e): raise e unittest.expectedFailure(bad_url)() pipeline.load(fd) fd.close()
def make_workspace(image, outlier_percentile): """Make a workspace """ module = C.ClipRange() pipeline = cpp.Pipeline() object_set = cpo.ObjectSet() image_set_list = cpi.ImageSetList() image_set = image_set_list.get_image_set(0) workspace = cpw.Workspace(pipeline, module, image_set, object_set, cpmeas.Measurements(), image_set_list) # setup the input images image_set.add(INPUT_IMAGE_NAME, cpi.Image(image)) # setup the input images settings module.x_name.value = INPUT_IMAGE_NAME module.y_name.value = OUTPUT_IMAGE_NAME module.outlier_percentile.value = outlier_percentile return workspace, module
def merge_files(destination, sources, force_headless=False): is_headless = force_headless or get_headless() if not is_headless: import wx if len(sources) == 0: return if not is_headless: progress = wx.ProgressDialog( "Writing " + destination, "Loading " + sources[0], maximum=len(sources) * 4 + 1, style=wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME, ) count = 0 try: pipeline = cpp.Pipeline() has_error = [False] def callback(caller, event): if isinstance(event, cpp.event.LoadException): has_error = True wx.MessageBox( message="Could not load %s: %s" % (sources[0], event.error), caption="Failed to load %s" % sources[0], ) has_error[0] = True pipeline.add_listener(callback) pipeline.load(sources[0]) if has_error[0]: return if destination.lower().endswith(".h5"): mdest = cpmeas.Measurements(filename=destination, multithread=False) h5_dest = True else: mdest = cpmeas.Measurements(multithread=False) h5_dest = False for source in sources: if not is_headless: count += 1 keep_going, skip = progress.Update(count, "Loading " + source) if not keep_going: return if h5py.is_hdf5(source): msource = cpmeas.Measurements(filename=source, mode="r", multithread=False) else: msource = cpmeas.load_measurements(source) dest_image_numbers = mdest.get_image_numbers() source_image_numbers = msource.get_image_numbers() if len(dest_image_numbers) == 0 or len( source_image_numbers) == 0: offset_source_image_numbers = source_image_numbers else: offset_source_image_numbers = ( np.max(dest_image_numbers) - np.min(source_image_numbers) + source_image_numbers + 1) for object_name in msource.get_object_names(): if object_name in mdest.get_object_names(): destfeatures = mdest.get_feature_names(object_name) else: destfeatures = [] for feature in msource.get_feature_names(object_name): if object_name == cpmeas.EXPERIMENT: if not mdest.has_feature(object_name, feature): src_value = msource.get_experiment_measurement( feature) mdest.add_experiment_measurement( feature, src_value) continue src_values = msource.get_measurement( object_name, feature, image_set_number=source_image_numbers) mdest[object_name, feature, offset_source_image_numbers] = src_values destset = set(destfeatures) if not is_headless: keep_going, skip = progress.Update(count + 1, "Saving to " + destination) if not keep_going: return if not h5_dest: pipeline.save_measurements(destination, mdest) finally: if not is_headless: progress.Destroy()
def do_job(self, job): """Handle a work request to its completion job - request.Work """ import cellprofiler_core.pipeline as cpp job_measurements = [] try: send_dictionary = job.wants_dictionary logging.info("Starting job") # Fetch the pipeline and preferences for this analysis if we don't have it current_pipeline, current_preferences = self.pipelines_and_preferences.get( self.current_analysis_id, (None, None) ) if not current_pipeline: logging.debug("Fetching pipeline and preferences") rep = self.send(PipelinePreferences(self.current_analysis_id)) logging.debug("Received pipeline and preferences response") preferences_dict = rep.preferences # update preferences to match remote values set_preferences_from_dict(preferences_dict) logging.debug("Loading pipeline") current_pipeline = cpp.Pipeline() pipeline_chunks = rep.pipeline_blob.tolist() pipeline_io = io.StringIO("".join(pipeline_chunks)) current_pipeline.loadtxt(pipeline_io, raise_on_error=True) logging.debug("Pipeline loaded") current_pipeline.add_listener(self.pipeline_listener.handle_event) current_preferences = rep.preferences self.pipelines_and_preferences[self.current_analysis_id] = ( current_pipeline, current_preferences, ) else: # update preferences to match remote values set_preferences_from_dict(current_preferences) # Reset the listener's state self.pipeline_listener.reset() logging.debug("Getting initial measurements") # Fetch the path to the intial measurements if needed. current_measurements = self.initial_measurements.get( self.current_analysis_id ) if current_measurements is None: logging.debug("Sending initial measurements request") rep = self.send(InitialMeasurements(self.current_analysis_id)) logging.debug("Got initial measurements") current_measurements = self.initial_measurements[ self.current_analysis_id ] = load_measurements_from_buffer(rep.buf) else: logging.debug("Has initial measurements") # Make a copy of the measurements for writing during this job current_measurements = Measurements(copy=current_measurements) all_measurements.add(current_measurements) job_measurements.append(current_measurements) successful_image_set_numbers = [] image_set_numbers = job.image_set_numbers worker_runs_post_group = job.worker_runs_post_group logging.info("Doing job: " + ",".join(map(str, image_set_numbers))) self.pipeline_listener.image_set_number = image_set_numbers[0] if not worker_runs_post_group: # Get the shared state from the first imageset in this run. shared_dicts = self.send( SharedDictionary(self.current_analysis_id) ).dictionaries assert len(shared_dicts) == len(current_pipeline.modules()) for module, new_dict in zip(current_pipeline.modules(), shared_dicts): module.set_dictionary_for_worker(new_dict) # Run prepare group if this is the first image in the group. We do # this here (even if there's no grouping in the pipeline) to ensure # that any changes to the modules' shared state dictionaries get # propagated correctly. should_process = True if current_measurements["Image", "Group_Index", image_set_numbers[0]] == 1: workspace = Workspace( current_pipeline, None, None, None, current_measurements, None, None ) if not current_pipeline.prepare_group( workspace, current_measurements.get_grouping_keys(), image_set_numbers, ): # exception handled elsewhere, possibly cancelling this run. should_process = False del workspace # process the images if should_process: abort = False for image_set_number in image_set_numbers: try: self.pipeline_listener.image_set_number = image_set_number last_workspace = current_pipeline.run_image_set( current_measurements, image_set_number, self.interaction_handler, self.display_handler, self.cancel_handler, ) if self.pipeline_listener.should_abort: abort = True break elif self.pipeline_listener.should_skip: # Report skipped image sets as successful so that # analysis can complete. # Report their measurements because some modules # may have provided measurements before skipping. pass successful_image_set_numbers.append(image_set_number) # Send an indication that the image set finished successfully. if send_dictionary: # The jobserver would like a copy of our modules' # run_state dictionaries. dicts = [ m.get_dictionary_for_worker() for m in current_pipeline.modules() ] req = ImageSetSuccessWithDictionary( self.current_analysis_id, image_set_number=image_set_number, shared_dicts=dicts, ) else: req = ImageSetSuccess( self.current_analysis_id, image_set_number=image_set_number, ) rep = self.send(req) except CancelledException: logging.info("Aborting job after cancellation") abort = True except Exception as e: try: logging.error("Error in pipeline", exc_info=True) if ( self.handle_exception(image_set_number=image_set_number) == ED_STOP ): abort = True break except: logging.error( "Error in handling of pipeline exception", exc_info=True ) # this is bad. We can't handle nested exceptions # remotely so we just fail on this run. abort = True if abort: current_measurements.close() job_measurements.remove(current_measurements) return if worker_runs_post_group: last_workspace.interaction_handler = self.interaction_handler last_workspace.cancel_handler = self.cancel_handler last_workspace.post_group_display_handler = ( self.post_group_display_handler ) # There might be an exception in this call, but it will be # handled elsewhere, and there's nothing we can do for it # here. current_pipeline.post_group( last_workspace, current_measurements.get_grouping_keys() ) del last_workspace # send measurements back to server req = MeasurementsReport( self.current_analysis_id, buf=current_measurements.file_contents(), image_set_numbers=image_set_numbers, ) rep = self.send(req) except CancelledException: # Main thread received shutdown signal raise except Exception: logging.error("Error in worker", exc_info=True) if self.handle_exception() == ED_STOP: raise CancelledException("Cancelling after user-requested stop") finally: # Clean up any measurements owned by us for m in job_measurements: m.close()